xref: /dragonfly/sys/dev/raid/ida/ida.c (revision bcb3e04d)
1 /*-
2  * Copyright (c) 1999,2000 Jonathan Lemon
3  * All rights reserved.
4  *
5  # Derived from the original IDA Compaq RAID driver, which is
6  * Copyright (c) 1996, 1997, 1998, 1999
7  *    Mark Dawson and David James. All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $FreeBSD: src/sys/dev/ida/ida.c,v 1.7.2.3 2001/03/01 01:57:32 ps Exp $
31  */
32 
33 /*
34  * Generic driver for Compaq SMART RAID adapters.
35  *
36  * Specific probe routines are in:
37  *	pci/ida_pci.c
38  *	i386/eisa/ida_eisa.c
39  */
40 
41 #include <sys/param.h>
42 #include <sys/kernel.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/proc.h>
47 #include <sys/buf.h>
48 #include <sys/bus.h>
49 #include <sys/devicestat.h>
50 #include <sys/disk.h>
51 #include <sys/rman.h>
52 #include <sys/buf2.h>
53 #include <sys/thread2.h>
54 
55 #include <machine/clock.h>
56 
57 #include "idareg.h"
58 #include "idavar.h"
59 
60 /* prototypes */
61 static void ida_alloc_qcb(struct ida_softc *ida);
62 static void ida_construct_qcb(struct ida_softc *ida);
63 static void ida_start(struct ida_softc *ida);
64 static void ida_done(struct ida_softc *ida, struct ida_qcb *qcb);
65 static int ida_wait(struct ida_softc *ida, struct ida_qcb *qcb);
66 
67 DECLARE_DUMMY_MODULE(ida);
68 
69 void
70 ida_free(struct ida_softc *ida)
71 {
72 	int i;
73 
74 	for (i = 0; i < ida->num_qcbs; i++)
75 		bus_dmamap_destroy(ida->buffer_dmat, ida->qcbs[i].dmamap);
76 
77 	if (ida->hwqcb_busaddr)
78 		bus_dmamap_unload(ida->hwqcb_dmat, ida->hwqcb_dmamap);
79 
80 	if (ida->hwqcbs)
81 		bus_dmamem_free(ida->hwqcb_dmat, ida->hwqcbs,
82 		    ida->hwqcb_dmamap);
83 
84 	if (ida->buffer_dmat)
85 		bus_dma_tag_destroy(ida->buffer_dmat);
86 
87 	if (ida->hwqcb_dmat)
88 		bus_dma_tag_destroy(ida->hwqcb_dmat);
89 
90 	if (ida->qcbs != NULL)
91 		kfree(ida->qcbs, M_DEVBUF);
92 
93 	if (ida->ih != NULL)
94                 bus_teardown_intr(ida->dev, ida->irq, ida->ih);
95 
96 	if (ida->irq != NULL)
97 		bus_release_resource(ida->dev, ida->irq_res_type,
98 		    0, ida->irq);
99 
100 	if (ida->parent_dmat != NULL)
101 		bus_dma_tag_destroy(ida->parent_dmat);
102 
103 	if (ida->regs != NULL)
104 		bus_release_resource(ida->dev, ida->regs_res_type,
105 		    ida->regs_res_id, ida->regs);
106 }
107 
108 /*
109  * record bus address from bus_dmamap_load
110  */
111 static void
112 ida_dma_map_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
113 {
114         bus_addr_t *baddr;
115 
116         baddr = (bus_addr_t *)arg;
117         *baddr = segs->ds_addr;
118 }
119 
120 static __inline struct ida_qcb *
121 ida_get_qcb(struct ida_softc *ida)
122 {
123 	struct ida_qcb *qcb;
124 
125 	if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) {
126 		SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle);
127 	} else {
128 		ida_alloc_qcb(ida);
129 		if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL)
130 			SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle);
131 	}
132 	return (qcb);
133 }
134 
135 static __inline bus_addr_t
136 idahwqcbvtop(struct ida_softc *ida, struct ida_hardware_qcb *hwqcb)
137 {
138 	return (ida->hwqcb_busaddr +
139 	    ((bus_addr_t)hwqcb - (bus_addr_t)ida->hwqcbs));
140 }
141 
142 static __inline struct ida_qcb *
143 idahwqcbptov(struct ida_softc *ida, bus_addr_t hwqcb_addr)
144 {
145 	struct ida_hardware_qcb *hwqcb;
146 
147 	hwqcb = (struct ida_hardware_qcb *)
148 	    ((bus_addr_t)ida->hwqcbs + (hwqcb_addr - ida->hwqcb_busaddr));
149 	return (hwqcb->qcb);
150 }
151 
152 /*
153  * XXX
154  * since we allocate all QCB space up front during initialization, then
155  * why bother with this routine?
156  */
157 static void
158 ida_alloc_qcb(struct ida_softc *ida)
159 {
160 	struct ida_qcb *qcb;
161 	int error;
162 
163 	if (ida->num_qcbs >= IDA_QCB_MAX)
164 		return;
165 
166 	qcb = &ida->qcbs[ida->num_qcbs];
167 
168 	error = bus_dmamap_create(ida->buffer_dmat, /*flags*/0, &qcb->dmamap);
169 	if (error != 0)
170 		return;
171 
172 	qcb->flags = QCB_FREE;
173 	qcb->hwqcb = &ida->hwqcbs[ida->num_qcbs];
174 	qcb->hwqcb->qcb = qcb;
175 	qcb->hwqcb_busaddr = idahwqcbvtop(ida, qcb->hwqcb);
176 	SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle);
177 	ida->num_qcbs++;
178 }
179 
180 int
181 ida_init(struct ida_softc *ida)
182 {
183 	int error;
184 
185 	ida->unit = device_get_unit(ida->dev);
186 	ida->tag = rman_get_bustag(ida->regs);
187 	ida->bsh = rman_get_bushandle(ida->regs);
188 
189 	SLIST_INIT(&ida->free_qcbs);
190 	STAILQ_INIT(&ida->qcb_queue);
191         bioq_init(&ida->bio_queue);
192 
193 	ida->qcbs = kmalloc(IDA_QCB_MAX * sizeof(struct ida_qcb),
194 			    M_DEVBUF, M_INTWAIT|M_ZERO);
195 
196 	/*
197 	 * Create our DMA tags
198 	 */
199 
200 	/* DMA tag for our hardware QCB structures */
201 	error = bus_dma_tag_create(ida->parent_dmat,
202 	    /*alignment*/1, /*boundary*/0,
203 	    /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR,
204 	    /*filter*/NULL, /*filterarg*/NULL,
205 	    IDA_QCB_MAX * sizeof(struct ida_hardware_qcb),
206 	    /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
207 	    /*flags*/0, &ida->hwqcb_dmat);
208 	if (error)
209                 return (ENOMEM);
210 
211 	/* DMA tag for mapping buffers into device space */
212 	error = bus_dma_tag_create(ida->parent_dmat,
213 	    /*alignment*/1, /*boundary*/0,
214 	    /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR,
215 	    /*filter*/NULL, /*filterarg*/NULL,
216 	    /*maxsize*/MAXBSIZE, /*nsegments*/IDA_NSEG,
217 	    /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ida->buffer_dmat);
218 	if (error)
219                 return (ENOMEM);
220 
221         /* Allocation of hardware QCBs */
222 	/* XXX allocation is rounded to hardware page size */
223 	error = bus_dmamem_alloc(ida->hwqcb_dmat,
224 	    (void *)&ida->hwqcbs, BUS_DMA_NOWAIT, &ida->hwqcb_dmamap);
225 	if (error)
226                 return (ENOMEM);
227 
228         /* And permanently map them in */
229         bus_dmamap_load(ida->hwqcb_dmat, ida->hwqcb_dmamap,
230 	    ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb),
231 	    ida_dma_map_cb, &ida->hwqcb_busaddr, /*flags*/0);
232 
233 	bzero(ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb));
234 
235 	ida_alloc_qcb(ida);		/* allocate an initial qcb */
236 
237 	return (0);
238 }
239 
240 void
241 ida_attach(struct ida_softc *ida)
242 {
243 	struct ida_controller_info cinfo;
244 	int error, i;
245 
246 	ida->cmd.int_enable(ida, 0);
247 
248 	error = ida_command(ida, CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo),
249 	    IDA_CONTROLLER, 0, DMA_DATA_IN);
250 	if (error) {
251 		device_printf(ida->dev, "CMD_GET_CTRL_INFO failed.\n");
252 		return;
253 	}
254 
255 	device_printf(ida->dev, "drives=%d firm_rev=%c%c%c%c\n",
256 	    cinfo.num_drvs, cinfo.firm_rev[0], cinfo.firm_rev[1],
257 	    cinfo.firm_rev[2], cinfo.firm_rev[3]);
258 
259 	if (ida->flags & IDA_FIRMWARE) {
260 		int data;
261 
262 		error = ida_command(ida, CMD_START_FIRMWARE,
263 		    &data, sizeof(data), IDA_CONTROLLER, 0, DMA_DATA_IN);
264 		if (error) {
265 			device_printf(ida->dev, "CMD_START_FIRMWARE failed.\n");
266 			return;
267 		}
268 	}
269 
270 	ida->num_drives = 0;
271 	for (i = 0; i < cinfo.num_drvs; i++)
272 		device_add_child(ida->dev, /*"idad"*/NULL, -1);
273 
274 	bus_generic_attach(ida->dev);
275 
276 	ida->cmd.int_enable(ida, 1);
277 }
278 
279 int
280 ida_detach(device_t dev)
281 {
282 	struct ida_softc *ida;
283 	int error = 0;
284 
285         ida = (struct ida_softc *)device_get_softc(dev);
286 
287 	/*
288 	 * XXX
289 	 * before detaching, we must make sure that the system is
290 	 * quiescent; nothing mounted, no pending activity.
291 	 */
292 
293 	/*
294 	 * XXX
295 	 * now, how are we supposed to maintain a list of our drives?
296 	 * iterate over our "child devices"?
297 	 */
298 
299 
300 	ida_free(ida);
301 	return (error);
302 }
303 
304 static void
305 ida_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
306 {
307 	struct ida_hardware_qcb *hwqcb = (struct ida_hardware_qcb *)arg;
308 	int i;
309 
310 	hwqcb->hdr.size = (sizeof(struct ida_req) +
311 	    sizeof(struct ida_sgb) * IDA_NSEG) >> 2;
312 
313 	for (i = 0; i < nsegments; i++) {
314 		hwqcb->seg[i].addr = segs[i].ds_addr;
315 		hwqcb->seg[i].length = segs[i].ds_len;
316 	}
317 	hwqcb->req.sgcount = nsegments;
318 }
319 
320 int
321 ida_command(struct ida_softc *ida, int command, void *data, int datasize,
322 	int drive, u_int64_t pblkno, int flags)
323 {
324 	struct ida_hardware_qcb *hwqcb;
325 	struct ida_qcb *qcb;
326 	bus_dmasync_op_t op;
327 	int error;
328 
329 	crit_enter();
330 	qcb = ida_get_qcb(ida);
331 	crit_exit();
332 
333 	if (qcb == NULL) {
334 		kprintf("ida_command: out of QCBs");
335 		return (EAGAIN);
336 	}
337 
338 	hwqcb = qcb->hwqcb;
339 	bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req));
340 
341 	bus_dmamap_load(ida->buffer_dmat, qcb->dmamap,
342 	    (void *)data, datasize, ida_setup_dmamap, hwqcb, 0);
343 	op = qcb->flags & DMA_DATA_IN ?
344 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE;
345 	bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
346 
347 	hwqcb->hdr.drive = drive;
348 	hwqcb->req.blkno = pblkno;
349 	hwqcb->req.bcount = howmany(datasize, DEV_BSIZE);
350 	hwqcb->req.command = command;
351 
352 	KKASSERT(pblkno < 0x100000000ULL);
353 
354 	qcb->flags = flags | IDA_COMMAND;
355 
356 	crit_enter();
357 	STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe);
358 	ida_start(ida);
359 	error = ida_wait(ida, qcb);
360 	crit_exit();
361 
362 	/* XXX should have status returned here? */
363 	/* XXX have "status pointer" area in QCB? */
364 
365 	return (error);
366 }
367 
368 void
369 ida_submit_buf(struct ida_softc *ida, struct bio *bio)
370 {
371         bioqdisksort(&ida->bio_queue, bio);
372         ida_construct_qcb(ida);
373 	ida_start(ida);
374 }
375 
376 static void
377 ida_construct_qcb(struct ida_softc *ida)
378 {
379 	struct ida_hardware_qcb *hwqcb;
380 	struct ida_qcb *qcb;
381 	bus_dmasync_op_t op;
382 	struct buf *bp;
383 	struct bio *bio;
384 
385 	bio = bioq_first(&ida->bio_queue);
386 	if (bio == NULL)
387 		return;				/* no more buffers */
388 
389 	qcb = ida_get_qcb(ida);
390 	if (qcb == NULL)
391 		return;				/* out of resources */
392 
393 	bioq_remove(&ida->bio_queue, bio);
394 	qcb->bio = bio;
395 	qcb->flags = 0;
396 
397 	hwqcb = qcb->hwqcb;
398 	bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req));
399 
400 	bp = bio->bio_buf;
401 	bus_dmamap_load(ida->buffer_dmat, qcb->dmamap,
402 	    (void *)bp->b_data, bp->b_bcount, ida_setup_dmamap, hwqcb, 0);
403 	op = qcb->flags & DMA_DATA_IN ?
404 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE;
405 	bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
406 
407 	{
408 		struct idad_softc *drv;
409 
410 		drv = (struct idad_softc *)bio->bio_driver_info;
411 		hwqcb->hdr.drive = drv->drive;
412 	}
413 
414 	hwqcb->req.blkno = bio->bio_offset >> DEV_BSHIFT;
415 	hwqcb->req.bcount = howmany(bp->b_bcount, DEV_BSIZE);
416 	hwqcb->req.command = (bp->b_cmd == BUF_CMD_READ) ? CMD_READ : CMD_WRITE;
417 
418 	KKASSERT(bio->bio_offset < 0x100000000ULL * DEV_BSIZE);
419 
420 	STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe);
421 }
422 
423 /*
424  * This routine will be called from ida_intr in order to queue up more
425  * I/O, meaning that we may be in an interrupt context.  Hence, we should
426  * not muck around with spl() in this routine.
427  */
428 static void
429 ida_start(struct ida_softc *ida)
430 {
431 	struct ida_qcb *qcb;
432 
433 	while ((qcb = STAILQ_FIRST(&ida->qcb_queue)) != NULL) {
434 		if (ida->cmd.fifo_full(ida))
435 			break;
436 		STAILQ_REMOVE_HEAD(&ida->qcb_queue, link.stqe);
437 		/*
438 		 * XXX
439 		 * place the qcb on an active list and set a timeout?
440 		 */
441 		qcb->state = QCB_ACTIVE;
442 		ida->cmd.submit(ida, qcb);
443 	}
444 }
445 
446 static int
447 ida_wait(struct ida_softc *ida, struct ida_qcb *qcb)
448 {
449 	struct ida_qcb *qcb_done = NULL;
450 	bus_addr_t completed;
451 	int delay;
452 
453 	if (ida->flags & IDA_INTERRUPTS) {
454 		if (tsleep((caddr_t)qcb, 0, "idacmd", 5 * hz))
455 			return (ETIMEDOUT);
456 		return (0);
457 	}
458 
459 again:
460 	delay = 5 * 1000 * 100;			/* 5 sec delay */
461 	while ((completed = ida->cmd.done(ida)) == 0) {
462 		if (delay-- == 0)
463 			return (ETIMEDOUT);
464 		DELAY(10);
465 	}
466 
467 	qcb_done = idahwqcbptov(ida, completed & ~3);
468 	if (qcb_done != qcb)
469 		goto again;
470 	ida_done(ida, qcb);
471 	return (0);
472 }
473 
474 void
475 ida_intr(void *data)
476 {
477 	struct ida_softc *ida;
478 	struct ida_qcb *qcb;
479 	bus_addr_t completed;
480 
481 	ida = (struct ida_softc *)data;
482 
483 	if (ida->cmd.int_pending(ida) == 0)
484 		return;				/* not our interrupt */
485 
486 	while ((completed = ida->cmd.done(ida)) != 0) {
487 		qcb = idahwqcbptov(ida, completed & ~3);
488 
489 		if (qcb == NULL || qcb->state != QCB_ACTIVE) {
490 			device_printf(ida->dev,
491 			    "ignoring completion %jx\n", (uintmax_t)completed);
492 			continue;
493 		}
494 		ida_done(ida, qcb);
495 	}
496 	ida_start(ida);
497 }
498 
499 /*
500  * should switch out command type; may be status, not just I/O.
501  */
502 static void
503 ida_done(struct ida_softc *ida, struct ida_qcb *qcb)
504 {
505 	int error = 0;
506 
507 	/*
508 	 * finish up command
509 	 */
510 	if (qcb->flags & DMA_DATA_TRANSFER) {
511 		bus_dmasync_op_t op;
512 
513 		op = qcb->flags & DMA_DATA_IN ?
514 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE;
515 		bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
516 		bus_dmamap_unload(ida->buffer_dmat, qcb->dmamap);
517 	}
518 
519 	if (qcb->hwqcb->req.error & SOFT_ERROR)
520 		device_printf(ida->dev, "soft error\n");
521 	if (qcb->hwqcb->req.error & HARD_ERROR) {
522 		error = 1;
523 		device_printf(ida->dev, "hard error\n");
524 	}
525 	if (qcb->hwqcb->req.error & CMD_REJECTED) {
526 		error = 1;
527 		device_printf(ida->dev, "invalid request\n");
528 	}
529 
530 	if (qcb->flags & IDA_COMMAND) {
531 		if (ida->flags & IDA_INTERRUPTS)
532 			wakeup(qcb);
533 	} else {
534 		if (error)
535 			qcb->bio->bio_buf->b_flags |= B_ERROR;
536 		idad_intr(qcb->bio);
537 	}
538 
539 	qcb->state = QCB_FREE;
540 	SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle);
541 	ida_construct_qcb(ida);
542 }
543