xref: /dragonfly/sys/dev/raid/ida/ida.c (revision 2020c8fe)
1 /*-
2  * Copyright (c) 1999,2000 Jonathan Lemon
3  * All rights reserved.
4  *
5  # Derived from the original IDA Compaq RAID driver, which is
6  * Copyright (c) 1996, 1997, 1998, 1999
7  *    Mark Dawson and David James. All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $FreeBSD: src/sys/dev/ida/ida.c,v 1.7.2.3 2001/03/01 01:57:32 ps Exp $
31  */
32 
33 /*
34  * Generic driver for Compaq SMART RAID adapters.
35  *
36  * Specific probe routines are in:
37  *	dev/raid/ida/ida_pci.c
38  */
39 
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/buf.h>
47 #include <sys/bus.h>
48 #include <sys/devicestat.h>
49 #include <sys/disk.h>
50 #include <sys/rman.h>
51 #include <sys/buf2.h>
52 #include <sys/thread2.h>
53 
54 #include <machine/clock.h>
55 
56 #include "idareg.h"
57 #include "idavar.h"
58 
59 /* prototypes */
60 static void ida_alloc_qcb(struct ida_softc *ida);
61 static void ida_construct_qcb(struct ida_softc *ida);
62 static void ida_start(struct ida_softc *ida);
63 static void ida_done(struct ida_softc *ida, struct ida_qcb *qcb);
64 static int ida_wait(struct ida_softc *ida, struct ida_qcb *qcb);
65 
66 DECLARE_DUMMY_MODULE(ida);
67 
68 void
69 ida_free(struct ida_softc *ida)
70 {
71 	int i;
72 
73 	for (i = 0; i < ida->num_qcbs; i++)
74 		bus_dmamap_destroy(ida->buffer_dmat, ida->qcbs[i].dmamap);
75 
76 	if (ida->hwqcb_busaddr)
77 		bus_dmamap_unload(ida->hwqcb_dmat, ida->hwqcb_dmamap);
78 
79 	if (ida->hwqcbs)
80 		bus_dmamem_free(ida->hwqcb_dmat, ida->hwqcbs,
81 		    ida->hwqcb_dmamap);
82 
83 	if (ida->buffer_dmat)
84 		bus_dma_tag_destroy(ida->buffer_dmat);
85 
86 	if (ida->hwqcb_dmat)
87 		bus_dma_tag_destroy(ida->hwqcb_dmat);
88 
89 	if (ida->qcbs != NULL)
90 		kfree(ida->qcbs, M_DEVBUF);
91 
92 	if (ida->ih != NULL)
93                 bus_teardown_intr(ida->dev, ida->irq, ida->ih);
94 
95 	if (ida->irq != NULL)
96 		bus_release_resource(ida->dev, ida->irq_res_type,
97 		    0, ida->irq);
98 
99 	if (ida->parent_dmat != NULL)
100 		bus_dma_tag_destroy(ida->parent_dmat);
101 
102 	if (ida->regs != NULL)
103 		bus_release_resource(ida->dev, ida->regs_res_type,
104 		    ida->regs_res_id, ida->regs);
105 }
106 
107 /*
108  * record bus address from bus_dmamap_load
109  */
110 static void
111 ida_dma_map_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
112 {
113         bus_addr_t *baddr;
114 
115         baddr = (bus_addr_t *)arg;
116         *baddr = segs->ds_addr;
117 }
118 
119 static __inline struct ida_qcb *
120 ida_get_qcb(struct ida_softc *ida)
121 {
122 	struct ida_qcb *qcb;
123 
124 	if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) {
125 		SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle);
126 	} else {
127 		ida_alloc_qcb(ida);
128 		if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL)
129 			SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle);
130 	}
131 	return (qcb);
132 }
133 
134 static __inline bus_addr_t
135 idahwqcbvtop(struct ida_softc *ida, struct ida_hardware_qcb *hwqcb)
136 {
137 	return (ida->hwqcb_busaddr +
138 	    ((bus_addr_t)hwqcb - (bus_addr_t)ida->hwqcbs));
139 }
140 
141 static __inline struct ida_qcb *
142 idahwqcbptov(struct ida_softc *ida, bus_addr_t hwqcb_addr)
143 {
144 	struct ida_hardware_qcb *hwqcb;
145 
146 	hwqcb = (struct ida_hardware_qcb *)
147 	    ((bus_addr_t)ida->hwqcbs + (hwqcb_addr - ida->hwqcb_busaddr));
148 	return (hwqcb->qcb);
149 }
150 
151 /*
152  * XXX
153  * since we allocate all QCB space up front during initialization, then
154  * why bother with this routine?
155  */
156 static void
157 ida_alloc_qcb(struct ida_softc *ida)
158 {
159 	struct ida_qcb *qcb;
160 	int error;
161 
162 	if (ida->num_qcbs >= IDA_QCB_MAX)
163 		return;
164 
165 	qcb = &ida->qcbs[ida->num_qcbs];
166 
167 	error = bus_dmamap_create(ida->buffer_dmat, /*flags*/0, &qcb->dmamap);
168 	if (error != 0)
169 		return;
170 
171 	qcb->flags = QCB_FREE;
172 	qcb->hwqcb = &ida->hwqcbs[ida->num_qcbs];
173 	qcb->hwqcb->qcb = qcb;
174 	qcb->hwqcb_busaddr = idahwqcbvtop(ida, qcb->hwqcb);
175 	SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle);
176 	ida->num_qcbs++;
177 }
178 
179 int
180 ida_init(struct ida_softc *ida)
181 {
182 	int error;
183 
184 	ida->unit = device_get_unit(ida->dev);
185 	ida->tag = rman_get_bustag(ida->regs);
186 	ida->bsh = rman_get_bushandle(ida->regs);
187 
188 	SLIST_INIT(&ida->free_qcbs);
189 	STAILQ_INIT(&ida->qcb_queue);
190         bioq_init(&ida->bio_queue);
191 
192 	ida->qcbs = kmalloc(IDA_QCB_MAX * sizeof(struct ida_qcb),
193 			    M_DEVBUF, M_INTWAIT|M_ZERO);
194 
195 	/*
196 	 * Create our DMA tags
197 	 */
198 
199 	/* DMA tag for our hardware QCB structures */
200 	error = bus_dma_tag_create(ida->parent_dmat,
201 	    /*alignment*/1, /*boundary*/0,
202 	    /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR,
203 	    /*filter*/NULL, /*filterarg*/NULL,
204 	    IDA_QCB_MAX * sizeof(struct ida_hardware_qcb),
205 	    /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
206 	    /*flags*/0, &ida->hwqcb_dmat);
207 	if (error)
208                 return (ENOMEM);
209 
210 	/* DMA tag for mapping buffers into device space */
211 	error = bus_dma_tag_create(ida->parent_dmat,
212 	    /*alignment*/1, /*boundary*/0,
213 	    /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR,
214 	    /*filter*/NULL, /*filterarg*/NULL,
215 	    /*maxsize*/MAXBSIZE, /*nsegments*/IDA_NSEG,
216 	    /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ida->buffer_dmat);
217 	if (error)
218                 return (ENOMEM);
219 
220         /* Allocation of hardware QCBs */
221 	/* XXX allocation is rounded to hardware page size */
222 	error = bus_dmamem_alloc(ida->hwqcb_dmat,
223 	    (void *)&ida->hwqcbs, BUS_DMA_NOWAIT, &ida->hwqcb_dmamap);
224 	if (error)
225                 return (ENOMEM);
226 
227         /* And permanently map them in */
228         bus_dmamap_load(ida->hwqcb_dmat, ida->hwqcb_dmamap,
229 	    ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb),
230 	    ida_dma_map_cb, &ida->hwqcb_busaddr, /*flags*/0);
231 
232 	bzero(ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb));
233 
234 	ida_alloc_qcb(ida);		/* allocate an initial qcb */
235 
236 	return (0);
237 }
238 
239 void
240 ida_attach(struct ida_softc *ida)
241 {
242 	struct ida_controller_info cinfo;
243 	int error, i;
244 
245 	ida->cmd.int_enable(ida, 0);
246 
247 	error = ida_command(ida, CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo),
248 	    IDA_CONTROLLER, 0, DMA_DATA_IN);
249 	if (error) {
250 		device_printf(ida->dev, "CMD_GET_CTRL_INFO failed.\n");
251 		return;
252 	}
253 
254 	device_printf(ida->dev, "drives=%d firm_rev=%c%c%c%c\n",
255 	    cinfo.num_drvs, cinfo.firm_rev[0], cinfo.firm_rev[1],
256 	    cinfo.firm_rev[2], cinfo.firm_rev[3]);
257 
258 	if (ida->flags & IDA_FIRMWARE) {
259 		int data;
260 
261 		error = ida_command(ida, CMD_START_FIRMWARE,
262 		    &data, sizeof(data), IDA_CONTROLLER, 0, DMA_DATA_IN);
263 		if (error) {
264 			device_printf(ida->dev, "CMD_START_FIRMWARE failed.\n");
265 			return;
266 		}
267 	}
268 
269 	ida->num_drives = 0;
270 	for (i = 0; i < cinfo.num_drvs; i++)
271 		device_add_child(ida->dev, /*"idad"*/NULL, -1);
272 
273 	bus_generic_attach(ida->dev);
274 
275 	ida->cmd.int_enable(ida, 1);
276 }
277 
278 int
279 ida_detach(device_t dev)
280 {
281 	struct ida_softc *ida;
282 	int error = 0;
283 
284         ida = (struct ida_softc *)device_get_softc(dev);
285 
286 	/*
287 	 * XXX
288 	 * before detaching, we must make sure that the system is
289 	 * quiescent; nothing mounted, no pending activity.
290 	 */
291 
292 	/*
293 	 * XXX
294 	 * now, how are we supposed to maintain a list of our drives?
295 	 * iterate over our "child devices"?
296 	 */
297 
298 
299 	ida_free(ida);
300 	return (error);
301 }
302 
303 static void
304 ida_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
305 {
306 	struct ida_hardware_qcb *hwqcb = (struct ida_hardware_qcb *)arg;
307 	int i;
308 
309 	hwqcb->hdr.size = (sizeof(struct ida_req) +
310 	    sizeof(struct ida_sgb) * IDA_NSEG) >> 2;
311 
312 	for (i = 0; i < nsegments; i++) {
313 		hwqcb->seg[i].addr = segs[i].ds_addr;
314 		hwqcb->seg[i].length = segs[i].ds_len;
315 	}
316 	hwqcb->req.sgcount = nsegments;
317 }
318 
319 int
320 ida_command(struct ida_softc *ida, int command, void *data, int datasize,
321 	int drive, u_int64_t pblkno, int flags)
322 {
323 	struct ida_hardware_qcb *hwqcb;
324 	struct ida_qcb *qcb;
325 	bus_dmasync_op_t op;
326 	int error;
327 
328 	crit_enter();
329 	qcb = ida_get_qcb(ida);
330 	crit_exit();
331 
332 	if (qcb == NULL) {
333 		kprintf("ida_command: out of QCBs");
334 		return (EAGAIN);
335 	}
336 
337 	hwqcb = qcb->hwqcb;
338 	bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req));
339 
340 	bus_dmamap_load(ida->buffer_dmat, qcb->dmamap,
341 	    (void *)data, datasize, ida_setup_dmamap, hwqcb, 0);
342 	op = qcb->flags & DMA_DATA_IN ?
343 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE;
344 	bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
345 
346 	hwqcb->hdr.drive = drive;
347 	hwqcb->req.blkno = pblkno;
348 	hwqcb->req.bcount = howmany(datasize, DEV_BSIZE);
349 	hwqcb->req.command = command;
350 
351 	KKASSERT(pblkno < 0x100000000ULL);
352 
353 	qcb->flags = flags | IDA_COMMAND;
354 
355 	crit_enter();
356 	STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe);
357 	ida_start(ida);
358 	error = ida_wait(ida, qcb);
359 	crit_exit();
360 
361 	/* XXX should have status returned here? */
362 	/* XXX have "status pointer" area in QCB? */
363 
364 	return (error);
365 }
366 
367 void
368 ida_submit_buf(struct ida_softc *ida, struct bio *bio)
369 {
370         bioqdisksort(&ida->bio_queue, bio);
371         ida_construct_qcb(ida);
372 	ida_start(ida);
373 }
374 
375 static void
376 ida_construct_qcb(struct ida_softc *ida)
377 {
378 	struct ida_hardware_qcb *hwqcb;
379 	struct ida_qcb *qcb;
380 	bus_dmasync_op_t op;
381 	struct buf *bp;
382 	struct bio *bio;
383 
384 	bio = bioq_first(&ida->bio_queue);
385 	if (bio == NULL)
386 		return;				/* no more buffers */
387 
388 	qcb = ida_get_qcb(ida);
389 	if (qcb == NULL)
390 		return;				/* out of resources */
391 
392 	bioq_remove(&ida->bio_queue, bio);
393 	qcb->bio = bio;
394 	qcb->flags = 0;
395 
396 	hwqcb = qcb->hwqcb;
397 	bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req));
398 
399 	bp = bio->bio_buf;
400 	bus_dmamap_load(ida->buffer_dmat, qcb->dmamap,
401 	    (void *)bp->b_data, bp->b_bcount, ida_setup_dmamap, hwqcb, 0);
402 	op = qcb->flags & DMA_DATA_IN ?
403 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE;
404 	bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
405 
406 	{
407 		struct idad_softc *drv;
408 
409 		drv = (struct idad_softc *)bio->bio_driver_info;
410 		hwqcb->hdr.drive = drv->drive;
411 	}
412 
413 	hwqcb->req.blkno = bio->bio_offset >> DEV_BSHIFT;
414 	hwqcb->req.bcount = howmany(bp->b_bcount, DEV_BSIZE);
415 	hwqcb->req.command = (bp->b_cmd == BUF_CMD_READ) ? CMD_READ : CMD_WRITE;
416 
417 	KKASSERT(bio->bio_offset < 0x100000000ULL * DEV_BSIZE);
418 
419 	STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe);
420 }
421 
422 /*
423  * This routine will be called from ida_intr in order to queue up more
424  * I/O, meaning that we may be in an interrupt context.  Hence, we should
425  * not muck around with spl() in this routine.
426  */
427 static void
428 ida_start(struct ida_softc *ida)
429 {
430 	struct ida_qcb *qcb;
431 
432 	while ((qcb = STAILQ_FIRST(&ida->qcb_queue)) != NULL) {
433 		if (ida->cmd.fifo_full(ida))
434 			break;
435 		STAILQ_REMOVE_HEAD(&ida->qcb_queue, link.stqe);
436 		/*
437 		 * XXX
438 		 * place the qcb on an active list and set a timeout?
439 		 */
440 		qcb->state = QCB_ACTIVE;
441 		ida->cmd.submit(ida, qcb);
442 	}
443 }
444 
445 static int
446 ida_wait(struct ida_softc *ida, struct ida_qcb *qcb)
447 {
448 	struct ida_qcb *qcb_done = NULL;
449 	bus_addr_t completed;
450 	int delay;
451 
452 	if (ida->flags & IDA_INTERRUPTS) {
453 		if (tsleep((caddr_t)qcb, 0, "idacmd", 5 * hz))
454 			return (ETIMEDOUT);
455 		return (0);
456 	}
457 
458 again:
459 	delay = 5 * 1000 * 100;			/* 5 sec delay */
460 	while ((completed = ida->cmd.done(ida)) == 0) {
461 		if (delay-- == 0)
462 			return (ETIMEDOUT);
463 		DELAY(10);
464 	}
465 
466 	qcb_done = idahwqcbptov(ida, completed & ~3);
467 	if (qcb_done != qcb)
468 		goto again;
469 	ida_done(ida, qcb);
470 	return (0);
471 }
472 
473 void
474 ida_intr(void *data)
475 {
476 	struct ida_softc *ida;
477 	struct ida_qcb *qcb;
478 	bus_addr_t completed;
479 
480 	ida = (struct ida_softc *)data;
481 
482 	if (ida->cmd.int_pending(ida) == 0)
483 		return;				/* not our interrupt */
484 
485 	while ((completed = ida->cmd.done(ida)) != 0) {
486 		qcb = idahwqcbptov(ida, completed & ~3);
487 
488 		if (qcb == NULL || qcb->state != QCB_ACTIVE) {
489 			device_printf(ida->dev,
490 			    "ignoring completion %jx\n", (uintmax_t)completed);
491 			continue;
492 		}
493 		ida_done(ida, qcb);
494 	}
495 	ida_start(ida);
496 }
497 
498 /*
499  * should switch out command type; may be status, not just I/O.
500  */
501 static void
502 ida_done(struct ida_softc *ida, struct ida_qcb *qcb)
503 {
504 	int error = 0;
505 
506 	/*
507 	 * finish up command
508 	 */
509 	if (qcb->flags & DMA_DATA_TRANSFER) {
510 		bus_dmasync_op_t op;
511 
512 		op = qcb->flags & DMA_DATA_IN ?
513 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE;
514 		bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
515 		bus_dmamap_unload(ida->buffer_dmat, qcb->dmamap);
516 	}
517 
518 	if (qcb->hwqcb->req.error & SOFT_ERROR)
519 		device_printf(ida->dev, "soft error\n");
520 	if (qcb->hwqcb->req.error & HARD_ERROR) {
521 		error = 1;
522 		device_printf(ida->dev, "hard error\n");
523 	}
524 	if (qcb->hwqcb->req.error & CMD_REJECTED) {
525 		error = 1;
526 		device_printf(ida->dev, "invalid request\n");
527 	}
528 
529 	if (qcb->flags & IDA_COMMAND) {
530 		if (ida->flags & IDA_INTERRUPTS)
531 			wakeup(qcb);
532 	} else {
533 		if (error)
534 			qcb->bio->bio_buf->b_flags |= B_ERROR;
535 		idad_intr(qcb->bio);
536 	}
537 
538 	qcb->state = QCB_FREE;
539 	SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle);
540 	ida_construct_qcb(ida);
541 }
542