xref: /dragonfly/sys/dev/raid/ida/ida.c (revision 37de577a)
1 /*-
2  * Copyright (c) 1999,2000 Jonathan Lemon
3  * All rights reserved.
4  *
5  # Derived from the original IDA Compaq RAID driver, which is
6  * Copyright (c) 1996, 1997, 1998, 1999
7  *    Mark Dawson and David James. All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $FreeBSD: src/sys/dev/ida/ida.c,v 1.7.2.3 2001/03/01 01:57:32 ps Exp $
31  */
32 
33 /*
34  * Generic driver for Compaq SMART RAID adapters.
35  *
36  * Specific probe routines are in:
37  *	dev/raid/ida/ida_pci.c
38  */
39 
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/proc.h>
45 #include <sys/buf.h>
46 #include <sys/bus.h>
47 #include <sys/devicestat.h>
48 #include <sys/disk.h>
49 #include <sys/rman.h>
50 #include <sys/buf2.h>
51 #include <sys/thread2.h>
52 
53 #include <machine/clock.h>
54 
55 #include "idareg.h"
56 #include "idavar.h"
57 
58 /* prototypes */
59 static void ida_alloc_qcb(struct ida_softc *ida);
60 static void ida_construct_qcb(struct ida_softc *ida);
61 static void ida_start(struct ida_softc *ida);
62 static void ida_done(struct ida_softc *ida, struct ida_qcb *qcb);
63 static int ida_wait(struct ida_softc *ida, struct ida_qcb *qcb);
64 
65 DECLARE_DUMMY_MODULE(ida);
66 
67 void
68 ida_free(struct ida_softc *ida)
69 {
70 	int i;
71 
72 	for (i = 0; i < ida->num_qcbs; i++)
73 		bus_dmamap_destroy(ida->buffer_dmat, ida->qcbs[i].dmamap);
74 
75 	if (ida->hwqcb_busaddr)
76 		bus_dmamap_unload(ida->hwqcb_dmat, ida->hwqcb_dmamap);
77 
78 	if (ida->hwqcbs)
79 		bus_dmamem_free(ida->hwqcb_dmat, ida->hwqcbs,
80 		    ida->hwqcb_dmamap);
81 
82 	if (ida->buffer_dmat)
83 		bus_dma_tag_destroy(ida->buffer_dmat);
84 
85 	if (ida->hwqcb_dmat)
86 		bus_dma_tag_destroy(ida->hwqcb_dmat);
87 
88 	if (ida->qcbs != NULL)
89 		kfree(ida->qcbs, M_DEVBUF);
90 
91 	if (ida->ih != NULL)
92                 bus_teardown_intr(ida->dev, ida->irq, ida->ih);
93 
94 	if (ida->irq != NULL)
95 		bus_release_resource(ida->dev, ida->irq_res_type,
96 		    0, ida->irq);
97 
98 	if (ida->parent_dmat != NULL)
99 		bus_dma_tag_destroy(ida->parent_dmat);
100 
101 	if (ida->regs != NULL)
102 		bus_release_resource(ida->dev, ida->regs_res_type,
103 		    ida->regs_res_id, ida->regs);
104 }
105 
106 /*
107  * record bus address from bus_dmamap_load
108  */
109 static void
110 ida_dma_map_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
111 {
112         bus_addr_t *baddr;
113 
114         baddr = (bus_addr_t *)arg;
115         *baddr = segs->ds_addr;
116 }
117 
118 static __inline struct ida_qcb *
119 ida_get_qcb(struct ida_softc *ida)
120 {
121 	struct ida_qcb *qcb;
122 
123 	if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) {
124 		SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle);
125 	} else {
126 		ida_alloc_qcb(ida);
127 		if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL)
128 			SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle);
129 	}
130 	return (qcb);
131 }
132 
133 static __inline bus_addr_t
134 idahwqcbvtop(struct ida_softc *ida, struct ida_hardware_qcb *hwqcb)
135 {
136 	return (ida->hwqcb_busaddr +
137 	    ((bus_addr_t)hwqcb - (bus_addr_t)ida->hwqcbs));
138 }
139 
140 static __inline struct ida_qcb *
141 idahwqcbptov(struct ida_softc *ida, bus_addr_t hwqcb_addr)
142 {
143 	struct ida_hardware_qcb *hwqcb;
144 
145 	hwqcb = (struct ida_hardware_qcb *)
146 	    ((bus_addr_t)ida->hwqcbs + (hwqcb_addr - ida->hwqcb_busaddr));
147 	return (hwqcb->qcb);
148 }
149 
150 /*
151  * XXX
152  * since we allocate all QCB space up front during initialization, then
153  * why bother with this routine?
154  */
155 static void
156 ida_alloc_qcb(struct ida_softc *ida)
157 {
158 	struct ida_qcb *qcb;
159 	int error;
160 
161 	if (ida->num_qcbs >= IDA_QCB_MAX)
162 		return;
163 
164 	qcb = &ida->qcbs[ida->num_qcbs];
165 
166 	error = bus_dmamap_create(ida->buffer_dmat, /*flags*/0, &qcb->dmamap);
167 	if (error != 0)
168 		return;
169 
170 	qcb->flags = QCB_FREE;
171 	qcb->hwqcb = &ida->hwqcbs[ida->num_qcbs];
172 	qcb->hwqcb->qcb = qcb;
173 	qcb->hwqcb_busaddr = idahwqcbvtop(ida, qcb->hwqcb);
174 	SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle);
175 	ida->num_qcbs++;
176 }
177 
178 int
179 ida_init(struct ida_softc *ida)
180 {
181 	int error;
182 
183 	ida->unit = device_get_unit(ida->dev);
184 	ida->tag = rman_get_bustag(ida->regs);
185 	ida->bsh = rman_get_bushandle(ida->regs);
186 
187 	SLIST_INIT(&ida->free_qcbs);
188 	STAILQ_INIT(&ida->qcb_queue);
189         bioq_init(&ida->bio_queue);
190 
191 	ida->qcbs = kmalloc(IDA_QCB_MAX * sizeof(struct ida_qcb),
192 			    M_DEVBUF, M_INTWAIT|M_ZERO);
193 
194 	/*
195 	 * Create our DMA tags
196 	 */
197 
198 	/* DMA tag for our hardware QCB structures */
199 	error = bus_dma_tag_create(ida->parent_dmat,
200 	    /*alignment*/1, /*boundary*/0,
201 	    /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR,
202 	    /*filter*/NULL, /*filterarg*/NULL,
203 	    IDA_QCB_MAX * sizeof(struct ida_hardware_qcb),
204 	    /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
205 	    /*flags*/0, &ida->hwqcb_dmat);
206 	if (error)
207                 return (ENOMEM);
208 
209 	/* DMA tag for mapping buffers into device space */
210 	error = bus_dma_tag_create(ida->parent_dmat,
211 	    /*alignment*/1, /*boundary*/0,
212 	    /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR,
213 	    /*filter*/NULL, /*filterarg*/NULL,
214 	    /*maxsize*/MAXBSIZE, /*nsegments*/IDA_NSEG,
215 	    /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ida->buffer_dmat);
216 	if (error)
217                 return (ENOMEM);
218 
219         /* Allocation of hardware QCBs */
220 	/* XXX allocation is rounded to hardware page size */
221 	error = bus_dmamem_alloc(ida->hwqcb_dmat,
222 	    (void *)&ida->hwqcbs, BUS_DMA_NOWAIT, &ida->hwqcb_dmamap);
223 	if (error)
224                 return (ENOMEM);
225 
226         /* And permanently map them in */
227         bus_dmamap_load(ida->hwqcb_dmat, ida->hwqcb_dmamap,
228 	    ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb),
229 	    ida_dma_map_cb, &ida->hwqcb_busaddr, /*flags*/0);
230 
231 	bzero(ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb));
232 
233 	ida_alloc_qcb(ida);		/* allocate an initial qcb */
234 
235 	return (0);
236 }
237 
238 void
239 ida_attach(struct ida_softc *ida)
240 {
241 	struct ida_controller_info cinfo;
242 	int error, i;
243 
244 	ida->cmd.int_enable(ida, 0);
245 
246 	error = ida_command(ida, CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo),
247 	    IDA_CONTROLLER, 0, DMA_DATA_IN);
248 	if (error) {
249 		device_printf(ida->dev, "CMD_GET_CTRL_INFO failed.\n");
250 		return;
251 	}
252 
253 	device_printf(ida->dev, "drives=%d firm_rev=%c%c%c%c\n",
254 	    cinfo.num_drvs, cinfo.firm_rev[0], cinfo.firm_rev[1],
255 	    cinfo.firm_rev[2], cinfo.firm_rev[3]);
256 
257 	if (ida->flags & IDA_FIRMWARE) {
258 		int data;
259 
260 		error = ida_command(ida, CMD_START_FIRMWARE,
261 		    &data, sizeof(data), IDA_CONTROLLER, 0, DMA_DATA_IN);
262 		if (error) {
263 			device_printf(ida->dev, "CMD_START_FIRMWARE failed.\n");
264 			return;
265 		}
266 	}
267 
268 	ida->num_drives = 0;
269 	for (i = 0; i < cinfo.num_drvs; i++)
270 		device_add_child(ida->dev, /*"idad"*/NULL, -1);
271 
272 	bus_generic_attach(ida->dev);
273 
274 	ida->cmd.int_enable(ida, 1);
275 }
276 
277 int
278 ida_detach(device_t dev)
279 {
280 	struct ida_softc *ida;
281 	int error = 0;
282 
283         ida = (struct ida_softc *)device_get_softc(dev);
284 
285 	/*
286 	 * XXX
287 	 * before detaching, we must make sure that the system is
288 	 * quiescent; nothing mounted, no pending activity.
289 	 */
290 
291 	/*
292 	 * XXX
293 	 * now, how are we supposed to maintain a list of our drives?
294 	 * iterate over our "child devices"?
295 	 */
296 
297 
298 	ida_free(ida);
299 	return (error);
300 }
301 
302 static void
303 ida_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
304 {
305 	struct ida_hardware_qcb *hwqcb = (struct ida_hardware_qcb *)arg;
306 	int i;
307 
308 	hwqcb->hdr.size = (sizeof(struct ida_req) +
309 	    sizeof(struct ida_sgb) * IDA_NSEG) >> 2;
310 
311 	for (i = 0; i < nsegments; i++) {
312 		hwqcb->seg[i].addr = segs[i].ds_addr;
313 		hwqcb->seg[i].length = segs[i].ds_len;
314 	}
315 	hwqcb->req.sgcount = nsegments;
316 }
317 
318 int
319 ida_command(struct ida_softc *ida, int command, void *data, int datasize,
320 	int drive, u_int64_t pblkno, int flags)
321 {
322 	struct ida_hardware_qcb *hwqcb;
323 	struct ida_qcb *qcb;
324 	bus_dmasync_op_t op;
325 	int error;
326 
327 	crit_enter();
328 	qcb = ida_get_qcb(ida);
329 	crit_exit();
330 
331 	if (qcb == NULL) {
332 		kprintf("ida_command: out of QCBs");
333 		return (EAGAIN);
334 	}
335 
336 	hwqcb = qcb->hwqcb;
337 	bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req));
338 
339 	bus_dmamap_load(ida->buffer_dmat, qcb->dmamap,
340 	    data, datasize, ida_setup_dmamap, hwqcb, 0);
341 	op = qcb->flags & DMA_DATA_IN ?
342 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE;
343 	bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
344 
345 	hwqcb->hdr.drive = drive;
346 	hwqcb->req.blkno = pblkno;
347 	hwqcb->req.bcount = howmany(datasize, DEV_BSIZE);
348 	hwqcb->req.command = command;
349 
350 	KKASSERT(pblkno < 0x100000000ULL);
351 
352 	qcb->flags = flags | IDA_COMMAND;
353 
354 	crit_enter();
355 	STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe);
356 	ida_start(ida);
357 	error = ida_wait(ida, qcb);
358 	crit_exit();
359 
360 	/* XXX should have status returned here? */
361 	/* XXX have "status pointer" area in QCB? */
362 
363 	return (error);
364 }
365 
366 void
367 ida_submit_buf(struct ida_softc *ida, struct bio *bio)
368 {
369         bioqdisksort(&ida->bio_queue, bio);
370         ida_construct_qcb(ida);
371 	ida_start(ida);
372 }
373 
374 static void
375 ida_construct_qcb(struct ida_softc *ida)
376 {
377 	struct ida_hardware_qcb *hwqcb;
378 	struct ida_qcb *qcb;
379 	bus_dmasync_op_t op;
380 	struct buf *bp;
381 	struct bio *bio;
382 
383 	bio = bioq_first(&ida->bio_queue);
384 	if (bio == NULL)
385 		return;				/* no more buffers */
386 
387 	qcb = ida_get_qcb(ida);
388 	if (qcb == NULL)
389 		return;				/* out of resources */
390 
391 	bioq_remove(&ida->bio_queue, bio);
392 	qcb->bio = bio;
393 	qcb->flags = 0;
394 
395 	hwqcb = qcb->hwqcb;
396 	bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req));
397 
398 	bp = bio->bio_buf;
399 	bus_dmamap_load(ida->buffer_dmat, qcb->dmamap,
400 	    (void *)bp->b_data, bp->b_bcount, ida_setup_dmamap, hwqcb, 0);
401 	op = qcb->flags & DMA_DATA_IN ?
402 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE;
403 	bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
404 
405 	{
406 		struct idad_softc *drv;
407 
408 		drv = (struct idad_softc *)bio->bio_driver_info;
409 		hwqcb->hdr.drive = drv->drive;
410 	}
411 
412 	hwqcb->req.blkno = bio->bio_offset >> DEV_BSHIFT;
413 	hwqcb->req.bcount = howmany(bp->b_bcount, DEV_BSIZE);
414 	hwqcb->req.command = (bp->b_cmd == BUF_CMD_READ) ? CMD_READ : CMD_WRITE;
415 
416 	KKASSERT(bio->bio_offset < 0x100000000ULL * DEV_BSIZE);
417 
418 	STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe);
419 }
420 
421 /*
422  * This routine will be called from ida_intr in order to queue up more
423  * I/O, meaning that we may be in an interrupt context.  Hence, we should
424  * not muck around with spl() in this routine.
425  */
426 static void
427 ida_start(struct ida_softc *ida)
428 {
429 	struct ida_qcb *qcb;
430 
431 	while ((qcb = STAILQ_FIRST(&ida->qcb_queue)) != NULL) {
432 		if (ida->cmd.fifo_full(ida))
433 			break;
434 		STAILQ_REMOVE_HEAD(&ida->qcb_queue, link.stqe);
435 		/*
436 		 * XXX
437 		 * place the qcb on an active list and set a timeout?
438 		 */
439 		qcb->state = QCB_ACTIVE;
440 		ida->cmd.submit(ida, qcb);
441 	}
442 }
443 
444 static int
445 ida_wait(struct ida_softc *ida, struct ida_qcb *qcb)
446 {
447 	struct ida_qcb *qcb_done = NULL;
448 	bus_addr_t completed;
449 	int delay;
450 
451 	if (ida->flags & IDA_INTERRUPTS) {
452 		if (tsleep((caddr_t)qcb, 0, "idacmd", 5 * hz))
453 			return (ETIMEDOUT);
454 		return (0);
455 	}
456 
457 again:
458 	delay = 5 * 1000 * 100;			/* 5 sec delay */
459 	while ((completed = ida->cmd.done(ida)) == 0) {
460 		if (delay-- == 0)
461 			return (ETIMEDOUT);
462 		DELAY(10);
463 	}
464 
465 	qcb_done = idahwqcbptov(ida, completed & ~3);
466 	if (qcb_done != qcb)
467 		goto again;
468 	ida_done(ida, qcb);
469 	return (0);
470 }
471 
472 void
473 ida_intr(void *data)
474 {
475 	struct ida_softc *ida;
476 	struct ida_qcb *qcb;
477 	bus_addr_t completed;
478 
479 	ida = (struct ida_softc *)data;
480 
481 	if (ida->cmd.int_pending(ida) == 0)
482 		return;				/* not our interrupt */
483 
484 	while ((completed = ida->cmd.done(ida)) != 0) {
485 		qcb = idahwqcbptov(ida, completed & ~3);
486 
487 		if (qcb == NULL || qcb->state != QCB_ACTIVE) {
488 			device_printf(ida->dev,
489 			    "ignoring completion %jx\n", (uintmax_t)completed);
490 			continue;
491 		}
492 		ida_done(ida, qcb);
493 	}
494 	ida_start(ida);
495 }
496 
497 /*
498  * should switch out command type; may be status, not just I/O.
499  */
500 static void
501 ida_done(struct ida_softc *ida, struct ida_qcb *qcb)
502 {
503 	int error = 0;
504 
505 	/*
506 	 * finish up command
507 	 */
508 	if (qcb->flags & DMA_DATA_TRANSFER) {
509 		bus_dmasync_op_t op;
510 
511 		op = qcb->flags & DMA_DATA_IN ?
512 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE;
513 		bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
514 		bus_dmamap_unload(ida->buffer_dmat, qcb->dmamap);
515 	}
516 
517 	if (qcb->hwqcb->req.error & SOFT_ERROR)
518 		device_printf(ida->dev, "soft error\n");
519 	if (qcb->hwqcb->req.error & HARD_ERROR) {
520 		error = 1;
521 		device_printf(ida->dev, "hard error\n");
522 	}
523 	if (qcb->hwqcb->req.error & CMD_REJECTED) {
524 		error = 1;
525 		device_printf(ida->dev, "invalid request\n");
526 	}
527 
528 	if (qcb->flags & IDA_COMMAND) {
529 		if (ida->flags & IDA_INTERRUPTS)
530 			wakeup(qcb);
531 	} else {
532 		if (error)
533 			qcb->bio->bio_buf->b_flags |= B_ERROR;
534 		idad_intr(qcb->bio);
535 	}
536 
537 	qcb->state = QCB_FREE;
538 	SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle);
539 	ida_construct_qcb(ida);
540 }
541