xref: /dragonfly/sys/dev/raid/twe/twe_freebsd.c (revision 7d84b73d)
1 /*-
2  * Copyright (c) 2000 Michael Smith
3  * Copyright (c) 2003 Paul Saab
4  * Copyright (c) 2003 Vinod Kashyap
5  * Copyright (c) 2000 BSDi
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/dev/twe/twe_freebsd.c,v 1.54 2012/11/17 01:52:19 svnexp Exp $
30  */
31 
32 /*
33  * FreeBSD-specific code.
34  */
35 
36 #include <dev/raid/twe/twe_compat.h>
37 #include <dev/raid/twe/twereg.h>
38 #include <dev/raid/twe/tweio.h>
39 #include <dev/raid/twe/twevar.h>
40 #include <dev/raid/twe/twe_tables.h>
41 #include <sys/dtype.h>
42 #include <sys/mplock2.h>
43 #include <sys/thread2.h>
44 
45 #include <vm/vm.h>
46 
47 static devclass_t	twe_devclass;
48 
49 #ifdef TWE_DEBUG
50 static u_int32_t	twed_bio_in;
51 #define TWED_BIO_IN	twed_bio_in++
52 static u_int32_t	twed_bio_out;
53 #define TWED_BIO_OUT	twed_bio_out++
54 #else
55 #define TWED_BIO_IN
56 #define TWED_BIO_OUT
57 #endif
58 
59 static void	twe_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
60 static void	twe_setup_request_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
61 
62 /********************************************************************************
63  ********************************************************************************
64                                                          Control device interface
65  ********************************************************************************
66  ********************************************************************************/
67 
68 static	d_open_t		twe_open;
69 static	d_close_t		twe_close;
70 static	d_ioctl_t		twe_ioctl_wrapper;
71 
72 static struct dev_ops twe_ops = {
73 	{ "twe", 0, D_MPSAFE },
74 	.d_open =	twe_open,
75 	.d_close =	twe_close,
76 	.d_ioctl =	twe_ioctl_wrapper,
77 };
78 
79 /********************************************************************************
80  * Accept an open operation on the control device.
81  */
82 static int
83 twe_open(struct dev_open_args *ap)
84 {
85     cdev_t			dev = ap->a_head.a_dev;
86     struct twe_softc		*sc = (struct twe_softc *)dev->si_drv1;
87 
88     TWE_IO_LOCK(sc);
89     if (sc->twe_state & TWE_STATE_DETACHING) {
90 	TWE_IO_UNLOCK(sc);
91 	return (ENXIO);
92     }
93     sc->twe_state |= TWE_STATE_OPEN;
94     TWE_IO_UNLOCK(sc);
95     return(0);
96 }
97 
98 /********************************************************************************
99  * Accept the last close on the control device.
100  */
101 static int
102 twe_close(struct dev_close_args *ap)
103 {
104     cdev_t			dev = ap->a_head.a_dev;
105     struct twe_softc		*sc = (struct twe_softc *)dev->si_drv1;
106 
107     TWE_IO_LOCK(sc);
108     sc->twe_state &= ~TWE_STATE_OPEN;
109     TWE_IO_UNLOCK(sc);
110     return (0);
111 }
112 
113 /********************************************************************************
114  * Handle controller-specific control operations.
115  */
116 static int
117 twe_ioctl_wrapper(struct dev_ioctl_args *ap)
118 {
119     cdev_t dev = ap->a_head.a_dev;
120     u_long cmd = ap->a_cmd;
121     caddr_t addr = ap->a_data;
122     struct twe_softc *sc = (struct twe_softc *)dev->si_drv1;
123 
124     return(twe_ioctl(sc, cmd, addr));
125 }
126 
127 /********************************************************************************
128  ********************************************************************************
129                                                              PCI device interface
130  ********************************************************************************
131  ********************************************************************************/
132 
133 static int	twe_probe(device_t dev);
134 static int	twe_attach(device_t dev);
135 static void	twe_free(struct twe_softc *sc);
136 static int	twe_detach(device_t dev);
137 static int	twe_shutdown(device_t dev);
138 static int	twe_suspend(device_t dev);
139 static int	twe_resume(device_t dev);
140 static void	twe_pci_intr(void *arg);
141 static void	twe_intrhook(void *arg);
142 
143 static device_method_t twe_methods[] = {
144     /* Device interface */
145     DEVMETHOD(device_probe,	twe_probe),
146     DEVMETHOD(device_attach,	twe_attach),
147     DEVMETHOD(device_detach,	twe_detach),
148     DEVMETHOD(device_shutdown,	twe_shutdown),
149     DEVMETHOD(device_suspend,	twe_suspend),
150     DEVMETHOD(device_resume,	twe_resume),
151 
152     DEVMETHOD_END
153 };
154 
155 static driver_t twe_pci_driver = {
156 	"twe",
157 	twe_methods,
158 	sizeof(struct twe_softc)
159 };
160 
161 DRIVER_MODULE(twe, pci, twe_pci_driver, twe_devclass, NULL, NULL);
162 
163 /********************************************************************************
164  * Match a 3ware Escalade ATA RAID controller.
165  */
166 static int
167 twe_probe(device_t dev)
168 {
169 
170     debug_called(4);
171 
172     if ((pci_get_vendor(dev) == TWE_VENDOR_ID) &&
173 	((pci_get_device(dev) == TWE_DEVICE_ID) ||
174 	 (pci_get_device(dev) == TWE_DEVICE_ID_ASIC))) {
175 	device_set_desc_copy(dev, TWE_DEVICE_NAME ". Driver version " TWE_DRIVER_VERSION_STRING);
176 	return(BUS_PROBE_DEFAULT);
177     }
178     return(ENXIO);
179 }
180 
181 /********************************************************************************
182  * Allocate resources, initialise the controller.
183  */
184 static int
185 twe_attach(device_t dev)
186 {
187     struct twe_softc	*sc;
188     int			rid, error;
189 
190     debug_called(4);
191 
192     /*
193      * Initialise the softc structure.
194      */
195     sc = device_get_softc(dev);
196     sc->twe_dev = dev;
197     lockinit(&sc->twe_io_lock, "twe I/O", 0, LK_CANRECURSE);
198     lockinit(&sc->twe_config_lock, "twe config", 0, LK_CANRECURSE);
199 
200     SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
201 	SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
202 	OID_AUTO, "driver_version", CTLFLAG_RD, TWE_DRIVER_VERSION_STRING, 0,
203 	"TWE driver version");
204 
205     /*
206      * Force the busmaster enable bit on, in case the BIOS forgot.
207      */
208     pci_enable_busmaster(dev);
209 
210     /*
211      * Allocate the PCI register window.
212      */
213     rid = TWE_IO_CONFIG_REG;
214     if ((sc->twe_io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
215         RF_ACTIVE)) == NULL) {
216 	twe_printf(sc, "can't allocate register window\n");
217 	twe_free(sc);
218 	return(ENXIO);
219     }
220 
221     /*
222      * Allocate the parent bus DMA tag appropriate for PCI.
223      */
224     if (bus_dma_tag_create(NULL, 				/* parent */
225 			   1, 0, 				/* alignment, boundary */
226 			   BUS_SPACE_MAXADDR_32BIT, 		/* lowaddr */
227 			   BUS_SPACE_MAXADDR, 			/* highaddr */
228 			   MAXBSIZE, TWE_MAX_SGL_LENGTH,	/* maxsize, nsegments */
229 			   BUS_SPACE_MAXSIZE_32BIT,		/* maxsegsize */
230 			   0,					/* flags */
231 			   &sc->twe_parent_dmat)) {
232 	twe_printf(sc, "can't allocate parent DMA tag\n");
233 	twe_free(sc);
234 	return(ENOMEM);
235     }
236 
237     /*
238      * Allocate and connect our interrupt.
239      */
240     rid = 0;
241     if ((sc->twe_irq = bus_alloc_resource_any(sc->twe_dev, SYS_RES_IRQ,
242         &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
243 	twe_printf(sc, "can't allocate interrupt\n");
244 	twe_free(sc);
245 	return(ENXIO);
246     }
247     if (bus_setup_intr(sc->twe_dev, sc->twe_irq, INTR_MPSAFE,
248 			twe_pci_intr, sc, &sc->twe_intr, NULL)) {
249 	twe_printf(sc, "can't set up interrupt\n");
250 	twe_free(sc);
251 	return(ENXIO);
252     }
253 
254     /*
255      * Create DMA tag for mapping command's into controller-addressable space.
256      */
257     if (bus_dma_tag_create(sc->twe_parent_dmat, 	/* parent */
258 			   1, 0, 			/* alignment, boundary */
259 			   BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
260 			   BUS_SPACE_MAXADDR, 		/* highaddr */
261 			   sizeof(TWE_Command) *
262 			   TWE_Q_LENGTH, 1,		/* maxsize, nsegments */
263 			   BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
264 			   0,				/* flags */
265 			   &sc->twe_cmd_dmat)) {
266 	twe_printf(sc, "can't allocate data buffer DMA tag\n");
267 	twe_free(sc);
268 	return(ENOMEM);
269     }
270     /*
271      * Allocate memory and make it available for DMA.
272      */
273     if (bus_dmamem_alloc(sc->twe_cmd_dmat, (void **)&sc->twe_cmd,
274 			 BUS_DMA_NOWAIT, &sc->twe_cmdmap)) {
275 	twe_printf(sc, "can't allocate command memory\n");
276 	return(ENOMEM);
277     }
278     bus_dmamap_load(sc->twe_cmd_dmat, sc->twe_cmdmap, sc->twe_cmd,
279 		    sizeof(TWE_Command) * TWE_Q_LENGTH,
280 		    twe_setup_request_dmamap, sc, 0);
281     bzero(sc->twe_cmd, sizeof(TWE_Command) * TWE_Q_LENGTH);
282 
283     /*
284      * Create DMA tag for mapping objects into controller-addressable space.
285      */
286     if (bus_dma_tag_create(sc->twe_parent_dmat, 	/* parent */
287 			   1, 0, 			/* alignment, boundary */
288 			   BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
289 			   BUS_SPACE_MAXADDR, 		/* highaddr */
290 			   MAXBSIZE, TWE_MAX_SGL_LENGTH,/* maxsize, nsegments */
291 			   BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
292 			   BUS_DMA_ALLOCNOW,		/* flags */
293 			   &sc->twe_buffer_dmat)) {
294 	twe_printf(sc, "can't allocate data buffer DMA tag\n");
295 	twe_free(sc);
296 	return(ENOMEM);
297     }
298 
299     /*
300      * Create DMA tag for mapping objects into controller-addressable space.
301      */
302     if (bus_dma_tag_create(sc->twe_parent_dmat, 	/* parent */
303 			   1, 0, 			/* alignment, boundary */
304 			   BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
305 			   BUS_SPACE_MAXADDR, 		/* highaddr */
306 			   MAXBSIZE, 1,			/* maxsize, nsegments */
307 			   BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
308 			   0,				/* flags */
309 			   &sc->twe_immediate_dmat)) {
310 	twe_printf(sc, "can't allocate data buffer DMA tag\n");
311 	twe_free(sc);
312 	return(ENOMEM);
313     }
314     /*
315      * Allocate memory for requests which cannot sleep or support continuation.
316      */
317      if (bus_dmamem_alloc(sc->twe_immediate_dmat, (void **)&sc->twe_immediate,
318 			  BUS_DMA_NOWAIT, &sc->twe_immediate_map)) {
319 	twe_printf(sc, "can't allocate memory for immediate requests\n");
320 	return(ENOMEM);
321      }
322 
323     /*
324      * Initialise the controller and driver core.
325      */
326     if ((error = twe_setup(sc))) {
327 	twe_free(sc);
328 	return(error);
329     }
330 
331     /*
332      * Print some information about the controller and configuration.
333      */
334     twe_describe_controller(sc);
335 
336     /*
337      * Create the control device.
338      */
339     sc->twe_dev_t = make_dev(&twe_ops, device_get_unit(sc->twe_dev),
340 			     UID_ROOT, GID_OPERATOR,
341 			     S_IRUSR | S_IWUSR, "twe%d",
342 			     device_get_unit(sc->twe_dev));
343     sc->twe_dev_t->si_drv1 = sc;
344 
345     /*
346      * Schedule ourselves to bring the controller up once interrupts are
347      * available.  This isn't strictly necessary, since we disable
348      * interrupts while probing the controller, but it is more in keeping
349      * with common practice for other disk devices.
350      */
351     sc->twe_ich.ich_func = twe_intrhook;
352     sc->twe_ich.ich_arg = sc;
353     sc->twe_ich.ich_desc = "twe";
354     if (config_intrhook_establish(&sc->twe_ich) != 0) {
355 	twe_printf(sc, "can't establish configuration hook\n");
356 	twe_free(sc);
357 	return(ENXIO);
358     }
359 
360     return(0);
361 }
362 
363 /********************************************************************************
364  * Free all of the resources associated with (sc).
365  *
366  * Should not be called if the controller is active.
367  */
368 static void
369 twe_free(struct twe_softc *sc)
370 {
371     struct twe_request	*tr;
372 
373     debug_called(4);
374 
375     /* throw away any command buffers */
376     while ((tr = twe_dequeue_free(sc)) != NULL)
377 	twe_free_request(tr);
378 
379     if (sc->twe_cmd != NULL) {
380 	bus_dmamap_unload(sc->twe_cmd_dmat, sc->twe_cmdmap);
381 	bus_dmamem_free(sc->twe_cmd_dmat, sc->twe_cmd, sc->twe_cmdmap);
382     }
383 
384     if (sc->twe_immediate != NULL) {
385 	bus_dmamap_unload(sc->twe_immediate_dmat, sc->twe_immediate_map);
386 	bus_dmamem_free(sc->twe_immediate_dmat, sc->twe_immediate,
387 			sc->twe_immediate_map);
388     }
389 
390     if (sc->twe_immediate_dmat)
391 	bus_dma_tag_destroy(sc->twe_immediate_dmat);
392 
393     /* destroy the data-transfer DMA tag */
394     if (sc->twe_buffer_dmat)
395 	bus_dma_tag_destroy(sc->twe_buffer_dmat);
396 
397     /* disconnect the interrupt handler */
398     if (sc->twe_intr)
399 	bus_teardown_intr(sc->twe_dev, sc->twe_irq, sc->twe_intr);
400     if (sc->twe_irq != NULL)
401 	bus_release_resource(sc->twe_dev, SYS_RES_IRQ, 0, sc->twe_irq);
402 
403     /* destroy the parent DMA tag */
404     if (sc->twe_parent_dmat)
405 	bus_dma_tag_destroy(sc->twe_parent_dmat);
406 
407     /* release the register window mapping */
408     if (sc->twe_io != NULL)
409 	bus_release_resource(sc->twe_dev, SYS_RES_IOPORT, TWE_IO_CONFIG_REG, sc->twe_io);
410 
411     /* destroy control device */
412     if (sc->twe_dev_t != NULL)
413 	destroy_dev(sc->twe_dev_t);
414     dev_ops_remove_minor(&twe_ops, device_get_unit(sc->twe_dev));
415 
416     lockuninit(&sc->twe_config_lock);
417     lockuninit(&sc->twe_io_lock);
418 }
419 
420 /********************************************************************************
421  * Disconnect from the controller completely, in preparation for unload.
422  */
423 static int
424 twe_detach(device_t dev)
425 {
426     struct twe_softc	*sc = device_get_softc(dev);
427 
428     debug_called(4);
429 
430     TWE_IO_LOCK(sc);
431     if (sc->twe_state & TWE_STATE_OPEN) {
432 	TWE_IO_UNLOCK(sc);
433 	return (EBUSY);
434     }
435     sc->twe_state |= TWE_STATE_DETACHING;
436     TWE_IO_UNLOCK(sc);
437 
438     /*
439      * Shut the controller down.
440      */
441     if (twe_shutdown(dev)) {
442 	TWE_IO_LOCK(sc);
443 	sc->twe_state &= ~TWE_STATE_DETACHING;
444 	TWE_IO_UNLOCK(sc);
445 	return (EBUSY);
446     }
447 
448     twe_free(sc);
449 
450     return(0);
451 }
452 
453 /********************************************************************************
454  * Bring the controller down to a dormant state and detach all child devices.
455  *
456  * Note that we can assume that the bioq on the controller is empty, as we won't
457  * allow shutdown if any device is open.
458  */
459 static int
460 twe_shutdown(device_t dev)
461 {
462     struct twe_softc	*sc = device_get_softc(dev);
463     int			i, error = 0;
464 
465     debug_called(4);
466 
467     /*
468      * Delete all our child devices.
469      */
470     TWE_CONFIG_LOCK(sc);
471     for (i = 0; i < TWE_MAX_UNITS; i++) {
472 	if (sc->twe_drive[i].td_disk != 0) {
473 	    if ((error = twe_detach_drive(sc, i)) != 0) {
474 		TWE_CONFIG_UNLOCK(sc);
475 		return (error);
476 	    }
477 	}
478     }
479     TWE_CONFIG_UNLOCK(sc);
480 
481     /*
482      * Bring the controller down.
483      */
484     TWE_IO_LOCK(sc);
485     twe_deinit(sc);
486     TWE_IO_UNLOCK(sc);
487 
488     return(0);
489 }
490 
491 /********************************************************************************
492  * Bring the controller to a quiescent state, ready for system suspend.
493  */
494 static int
495 twe_suspend(device_t dev)
496 {
497     struct twe_softc	*sc = device_get_softc(dev);
498 
499     debug_called(4);
500 
501     TWE_IO_LOCK(sc);
502     sc->twe_state |= TWE_STATE_SUSPEND;
503     TWE_IO_UNLOCK(sc);
504 
505     twe_disable_interrupts(sc);
506     crit_exit();
507 
508     return(0);
509 }
510 
511 /********************************************************************************
512  * Bring the controller back to a state ready for operation.
513  */
514 static int
515 twe_resume(device_t dev)
516 {
517     struct twe_softc	*sc = device_get_softc(dev);
518 
519     debug_called(4);
520 
521     TWE_IO_LOCK(sc);
522     sc->twe_state &= ~TWE_STATE_SUSPEND;
523     twe_enable_interrupts(sc);
524     TWE_IO_UNLOCK(sc);
525 
526     return(0);
527 }
528 
529 /*******************************************************************************
530  * Take an interrupt, or be poked by other code to look for interrupt-worthy
531  * status.
532  */
533 static void
534 twe_pci_intr(void *arg)
535 {
536     struct twe_softc *sc = arg;
537 
538     TWE_IO_LOCK(sc);
539     twe_intr(sc);
540     TWE_IO_UNLOCK(sc);
541 }
542 
543 /********************************************************************************
544  * Delayed-startup hook
545  */
546 static void
547 twe_intrhook(void *arg)
548 {
549     struct twe_softc		*sc = (struct twe_softc *)arg;
550 
551     /* pull ourselves off the intrhook chain */
552     config_intrhook_disestablish(&sc->twe_ich);
553 
554     /* call core startup routine */
555     twe_init(sc);
556 }
557 
558 /********************************************************************************
559  * Given a detected drive, attach it to the bio interface.
560  *
561  * This is called from twe_add_unit.
562  */
563 int
564 twe_attach_drive(struct twe_softc *sc, struct twe_drive *dr)
565 {
566     char	buf[80];
567     int		error;
568 
569     get_mplock();
570     dr->td_disk =  device_add_child(sc->twe_dev, NULL, -1);
571     if (dr->td_disk == NULL) {
572 	rel_mplock();
573 	twe_printf(sc, "Cannot add unit\n");
574 	return (EIO);
575     }
576     device_set_ivars(dr->td_disk, dr);
577 
578     /*
579      * XXX It would make sense to test the online/initialising bits, but they seem to be
580      * always set...
581      */
582     ksprintf(buf, "Unit %d, %s, %s",
583 	    dr->td_twe_unit,
584 	    twe_describe_code(twe_table_unittype, dr->td_type),
585 	    twe_describe_code(twe_table_unitstate, dr->td_state & TWE_PARAM_UNITSTATUS_MASK));
586     device_set_desc_copy(dr->td_disk, buf);
587 
588     error = device_probe_and_attach(dr->td_disk);
589     rel_mplock();
590     if (error != 0) {
591 	twe_printf(sc, "Cannot attach unit to controller. error = %d\n", error);
592 	return (EIO);
593     }
594     return (0);
595 }
596 
597 /********************************************************************************
598  * Detach the specified unit if it exsists
599  *
600  * This is called from twe_del_unit.
601  */
602 int
603 twe_detach_drive(struct twe_softc *sc, int unit)
604 {
605     int error = 0;
606 
607     TWE_CONFIG_ASSERT_LOCKED(sc);
608     get_mplock();
609     error = device_delete_child(sc->twe_dev, sc->twe_drive[unit].td_disk);
610     rel_mplock();
611     if (error != 0) {
612 	twe_printf(sc, "failed to delete unit %d\n", unit);
613 	return(error);
614     }
615     bzero(&sc->twe_drive[unit], sizeof(sc->twe_drive[unit]));
616     return(error);
617 }
618 
619 /********************************************************************************
620  * Clear a PCI parity error.
621  */
622 void
623 twe_clear_pci_parity_error(struct twe_softc *sc)
624 {
625     TWE_CONTROL(sc, TWE_CONTROL_CLEAR_PARITY_ERROR);
626     pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PARITY_ERROR, 2);
627 }
628 
629 /********************************************************************************
630  * Clear a PCI abort.
631  */
632 void
633 twe_clear_pci_abort(struct twe_softc *sc)
634 {
635     TWE_CONTROL(sc, TWE_CONTROL_CLEAR_PCI_ABORT);
636     pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PCI_ABORT, 2);
637 }
638 
639 /********************************************************************************
640  ********************************************************************************
641                                                                       Disk device
642  ********************************************************************************
643  ********************************************************************************/
644 
645 /*
646  * Disk device bus interface
647  */
648 static int twed_probe(device_t dev);
649 static int twed_attach(device_t dev);
650 static int twed_detach(device_t dev);
651 
652 static device_method_t twed_methods[] = {
653     DEVMETHOD(device_probe,	twed_probe),
654     DEVMETHOD(device_attach,	twed_attach),
655     DEVMETHOD(device_detach,	twed_detach),
656     DEVMETHOD_END
657 };
658 
659 static driver_t twed_driver = {
660     "twed",
661     twed_methods,
662     sizeof(struct twed_softc)
663 };
664 
665 static devclass_t	twed_devclass;
666 DRIVER_MODULE(twed, twe, twed_driver, twed_devclass, NULL, NULL);
667 
668 /*
669  * Disk device control interface.
670  */
671 static	d_open_t	twed_open;
672 static	d_close_t	twed_close;
673 static	d_strategy_t	twed_strategy;
674 static	d_dump_t	twed_dump;
675 
676 static struct dev_ops twed_ops = {
677 	{ "twed", 0, D_DISK | D_MPSAFE},
678 	.d_open =	twed_open,
679 	.d_close =	twed_close,
680 	.d_read =	physread,
681 	.d_write =	physwrite,
682 	.d_strategy =	twed_strategy,
683 	.d_dump =	twed_dump,
684 };
685 
686 /********************************************************************************
687  * Handle open from generic layer.
688  *
689  * Note that this is typically only called by the diskslice code, and not
690  * for opens on subdevices (eg. slices, partitions).
691  */
692 static int
693 twed_open(struct dev_open_args *ap)
694 {
695     cdev_t dev = ap->a_head.a_dev;
696     struct twed_softc	*sc = (struct twed_softc *)dev->si_drv1;
697 
698     debug_called(4);
699 
700     if (sc == NULL)
701 	return (ENXIO);
702 
703     /* check that the controller is up and running */
704     if (sc->twed_controller->twe_state & TWE_STATE_SHUTDOWN)
705 	return(ENXIO);
706 
707     sc->twed_flags |= TWED_OPEN;
708     return (0);
709 }
710 
711 /********************************************************************************
712  * Handle last close of the disk device.
713  */
714 static int
715 twed_close(struct dev_close_args *ap)
716 {
717     cdev_t dev = ap->a_head.a_dev;
718     struct twed_softc	*sc = (struct twed_softc *)dev->si_drv1;
719 
720     debug_called(4);
721 
722     if (sc == NULL)
723 	return (ENXIO);
724 
725     sc->twed_flags &= ~TWED_OPEN;
726     return (0);
727 }
728 
729 /********************************************************************************
730  * Handle an I/O request.
731  */
732 static int
733 twed_strategy(struct dev_strategy_args *ap)
734 {
735     cdev_t dev = ap->a_head.a_dev;
736     struct bio *bio = ap->a_bio;
737     struct twed_softc *sc = dev->si_drv1;
738     struct buf *bp = bio->bio_buf;
739 
740     bio->bio_driver_info = sc;
741 
742     debug_called(4);
743 
744     TWED_BIO_IN;
745 
746     /* bogus disk? */
747     if (sc == NULL || sc->twed_drive->td_disk == NULL) {
748 	bp->b_error = EINVAL;
749 	bp->b_flags |= B_ERROR;
750 	kprintf("twe: bio for invalid disk!\n");
751 	biodone(bio);
752 	TWED_BIO_OUT;
753 	return(0);
754     }
755 
756     /* perform accounting */
757     devstat_start_transaction(&sc->twed_stats);
758 
759     /* queue the bio on the controller */
760     TWE_IO_LOCK(sc->twed_controller);
761     twe_enqueue_bio(sc->twed_controller, bio);
762 
763     /* poke the controller to start I/O */
764     twe_startio(sc->twed_controller);
765     TWE_IO_UNLOCK(sc->twed_controller);
766     return(0);
767 }
768 
769 /********************************************************************************
770  * System crashdump support
771  */
772 static int
773 twed_dump(struct dev_dump_args *ap)
774 {
775     cdev_t dev = ap->a_head.a_dev;
776     size_t length = ap->a_length;
777     off_t offset = ap->a_offset;
778     void *virtual = ap->a_virtual;
779     struct twed_softc	*twed_sc;
780     struct twe_softc	*twe_sc;
781     int			error;
782 
783     twed_sc = dev->si_drv1;
784     if (twed_sc == NULL)
785 	return(ENXIO);
786     twe_sc  = (struct twe_softc *)twed_sc->twed_controller;
787 
788     if (length > 0) {
789 	if ((error = twe_dump_blocks(twe_sc, twed_sc->twed_drive->td_twe_unit, offset / TWE_BLOCK_SIZE, virtual, length / TWE_BLOCK_SIZE)) != 0)
790 	    return(error);
791     }
792     return(0);
793 }
794 
795 /********************************************************************************
796  * Handle completion of an I/O request.
797  */
798 void
799 twed_intr(struct bio *bio)
800 {
801     struct buf *bp = bio->bio_buf;
802     struct twed_softc *sc = bio->bio_driver_info;
803 
804     debug_called(4);
805 
806     /* if no error, transfer completed */
807     if (!(bp->b_flags & B_ERROR))
808 	bp->b_resid = 0;
809     devstat_end_transaction_buf(&sc->twed_stats, bp);
810     biodone(bio);
811     TWED_BIO_OUT;
812 }
813 
814 /********************************************************************************
815  * Default probe stub.
816  */
817 static int
818 twed_probe(device_t dev)
819 {
820     return (0);
821 }
822 
823 /********************************************************************************
824  * Attach a unit to the controller.
825  */
826 static int
827 twed_attach(device_t dev)
828 {
829     struct twed_softc	*sc;
830     struct disk_info	info;
831     device_t		parent;
832     cdev_t		dsk;
833 
834     debug_called(4);
835 
836     /* initialise our softc */
837     sc = device_get_softc(dev);
838     parent = device_get_parent(dev);
839     sc->twed_controller = (struct twe_softc *)device_get_softc(parent);
840     sc->twed_drive = device_get_ivars(dev);
841     sc->twed_dev = dev;
842 
843     /* report the drive */
844     twed_printf(sc, "%uMB (%u sectors)\n",
845 		sc->twed_drive->td_size / ((1024 * 1024) / TWE_BLOCK_SIZE),
846 		sc->twed_drive->td_size);
847 
848     /* attach a generic disk device to ourselves */
849 
850     sc->twed_drive->td_sys_unit = device_get_unit(dev);
851 
852     devstat_add_entry(&sc->twed_stats, "twed", sc->twed_drive->td_sys_unit,
853 			TWE_BLOCK_SIZE,
854 			DEVSTAT_NO_ORDERED_TAGS,
855 			DEVSTAT_TYPE_STORARRAY | DEVSTAT_TYPE_IF_OTHER,
856 			DEVSTAT_PRIORITY_ARRAY);
857 
858     dsk = disk_create(sc->twed_drive->td_sys_unit, &sc->twed_disk, &twed_ops);
859     dsk->si_drv1 = sc;
860     sc->twed_dev_t = dsk;
861 
862     /* set the maximum I/O size to the theoretical maximum allowed by the S/G list size */
863     dsk->si_iosize_max = (TWE_MAX_SGL_LENGTH - 1) * PAGE_SIZE;
864 
865     /*
866      * Set disk info, as it appears that all needed data is available already.
867      * Setting the disk info will also cause the probing to start.
868      */
869     bzero(&info, sizeof(info));
870     info.d_media_blksize    = TWE_BLOCK_SIZE;	/* mandatory */
871     info.d_media_blocks	    = sc->twed_drive->td_size;
872 
873     info.d_type		= DTYPE_ESDI;		/* optional */
874     info.d_secpertrack	= sc->twed_drive->td_sectors;
875     info.d_nheads	= sc->twed_drive->td_heads;
876     info.d_ncylinders	= sc->twed_drive->td_cylinders;
877     info.d_secpercyl	= sc->twed_drive->td_sectors * sc->twed_drive->td_heads;
878 
879     disk_setdiskinfo(&sc->twed_disk, &info);
880 
881     return (0);
882 }
883 
884 /********************************************************************************
885  * Disconnect ourselves from the system.
886  */
887 static int
888 twed_detach(device_t dev)
889 {
890     struct twed_softc *sc = (struct twed_softc *)device_get_softc(dev);
891 
892     debug_called(4);
893 
894     if (sc->twed_flags & TWED_OPEN)
895 	return(EBUSY);
896 
897     devstat_remove_entry(&sc->twed_stats);
898     disk_destroy(&sc->twed_disk);
899 
900     return(0);
901 }
902 
903 /********************************************************************************
904  ********************************************************************************
905                                                                              Misc
906  ********************************************************************************
907  ********************************************************************************/
908 
909 /********************************************************************************
910  * Allocate a command buffer
911  */
912 static MALLOC_DEFINE(TWE_MALLOC_CLASS, "twe_commands", "twe commands");
913 
914 struct twe_request *
915 twe_allocate_request(struct twe_softc *sc, int tag)
916 {
917     struct twe_request	*tr;
918     int aligned_size;
919 
920     /*
921      * TWE requires requests to be 512-byte aligned.  Depend on malloc()
922      * guarenteeing alignment for power-of-2 requests.  Note that the old
923      * (FreeBSD-4.x) malloc code aligned all requests, but the new slab
924      * allocator only guarentees same-size alignment for power-of-2 requests.
925      */
926     aligned_size = (sizeof(struct twe_request) + TWE_ALIGNMASK) &
927 	~TWE_ALIGNMASK;
928     tr = kmalloc(aligned_size, TWE_MALLOC_CLASS, M_INTWAIT | M_ZERO);
929     tr->tr_sc = sc;
930     tr->tr_tag = tag;
931     if (bus_dmamap_create(sc->twe_buffer_dmat, 0, &tr->tr_dmamap)) {
932 	twe_free_request(tr);
933 	twe_printf(sc, "unable to allocate dmamap for tag %d\n", tag);
934 	return(NULL);
935     }
936     return(tr);
937 }
938 
939 /********************************************************************************
940  * Permanently discard a command buffer.
941  */
942 void
943 twe_free_request(struct twe_request *tr)
944 {
945     struct twe_softc	*sc = tr->tr_sc;
946 
947     debug_called(4);
948 
949     bus_dmamap_destroy(sc->twe_buffer_dmat, tr->tr_dmamap);
950     kfree(tr, TWE_MALLOC_CLASS);
951 }
952 
953 /********************************************************************************
954  * Map/unmap (tr)'s command and data in the controller's addressable space.
955  *
956  * These routines ensure that the data which the controller is going to try to
957  * access is actually visible to the controller, in a machine-independant
958  * fashion.  Due to a hardware limitation, I/O buffers must be 512-byte aligned
959  * and we take care of that here as well.
960  */
961 static void
962 twe_fillin_sgl(TWE_SG_Entry *sgl, bus_dma_segment_t *segs, int nsegments, int max_sgl)
963 {
964     int i;
965 
966     for (i = 0; i < nsegments; i++) {
967 	sgl[i].address = segs[i].ds_addr;
968 	sgl[i].length = segs[i].ds_len;
969     }
970     for (; i < max_sgl; i++) {				/* XXX necessary? */
971 	sgl[i].address = 0;
972 	sgl[i].length = 0;
973     }
974 }
975 
976 static void
977 twe_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
978 {
979     struct twe_request	*tr = (struct twe_request *)arg;
980     struct twe_softc	*sc = tr->tr_sc;
981     TWE_Command		*cmd = TWE_FIND_COMMAND(tr);
982 
983     debug_called(4);
984 
985     if (tr->tr_flags & TWE_CMD_MAPPED)
986 	panic("already mapped command");
987 
988     tr->tr_flags |= TWE_CMD_MAPPED;
989 
990     if (tr->tr_flags & TWE_CMD_IN_PROGRESS)
991 	sc->twe_state &= ~TWE_STATE_FRZN;
992     /* save base of first segment in command (applicable if there only one segment) */
993     tr->tr_dataphys = segs[0].ds_addr;
994 
995     /* correct command size for s/g list size */
996     cmd->generic.size += 2 * nsegments;
997 
998     /*
999      * Due to the fact that parameter and I/O commands have the scatter/gather list in
1000      * different places, we need to determine which sort of command this actually is
1001      * before we can populate it correctly.
1002      */
1003     switch(cmd->generic.opcode) {
1004     case TWE_OP_GET_PARAM:
1005     case TWE_OP_SET_PARAM:
1006 	cmd->generic.sgl_offset = 2;
1007 	twe_fillin_sgl(&cmd->param.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
1008 	break;
1009     case TWE_OP_READ:
1010     case TWE_OP_WRITE:
1011 	cmd->generic.sgl_offset = 3;
1012 	twe_fillin_sgl(&cmd->io.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
1013 	break;
1014     case TWE_OP_ATA_PASSTHROUGH:
1015 	cmd->generic.sgl_offset = 5;
1016 	twe_fillin_sgl(&cmd->ata.sgl[0], segs, nsegments, TWE_MAX_ATA_SGL_LENGTH);
1017 	break;
1018     default:
1019 	/*
1020 	 * Fall back to what the linux driver does.
1021 	 * Do this because the API may send an opcode
1022 	 * the driver knows nothing about and this will
1023 	 * at least stop PCIABRT's from hosing us.
1024 	 */
1025 	switch (cmd->generic.sgl_offset) {
1026 	case 2:
1027 	    twe_fillin_sgl(&cmd->param.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
1028 	    break;
1029 	case 3:
1030 	    twe_fillin_sgl(&cmd->io.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
1031 	    break;
1032 	case 5:
1033 	    twe_fillin_sgl(&cmd->ata.sgl[0], segs, nsegments, TWE_MAX_ATA_SGL_LENGTH);
1034 	    break;
1035 	}
1036     }
1037 
1038     if (tr->tr_flags & TWE_CMD_DATAIN) {
1039 	if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1040 	    bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1041 			    BUS_DMASYNC_PREREAD);
1042 	} else {
1043 	    bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1044 			    BUS_DMASYNC_PREREAD);
1045 	}
1046     }
1047 
1048     if (tr->tr_flags & TWE_CMD_DATAOUT) {
1049 	/*
1050 	 * if we're using an alignment buffer, and we're writing data
1051 	 * copy the real data out
1052 	 */
1053 	if (tr->tr_flags & TWE_CMD_ALIGNBUF)
1054 	    bcopy(tr->tr_realdata, tr->tr_data, tr->tr_length);
1055 
1056 	if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1057 	    bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1058 			    BUS_DMASYNC_PREWRITE);
1059 	} else {
1060 	    bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1061 			    BUS_DMASYNC_PREWRITE);
1062 	}
1063     }
1064 
1065     if (twe_start(tr) == EBUSY) {
1066 	tr->tr_sc->twe_state |= TWE_STATE_CTLR_BUSY;
1067 	twe_requeue_ready(tr);
1068     }
1069 }
1070 
1071 static void
1072 twe_setup_request_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1073 {
1074     struct twe_softc	*sc = (struct twe_softc *)arg;
1075 
1076     debug_called(4);
1077 
1078     /* command can't cross a page boundary */
1079     sc->twe_cmdphys = segs[0].ds_addr;
1080 }
1081 
1082 int
1083 twe_map_request(struct twe_request *tr)
1084 {
1085     struct twe_softc	*sc = tr->tr_sc;
1086     int			error = 0;
1087 
1088     debug_called(4);
1089 
1090     twe_lockassert(&sc->twe_io_lock);
1091     if (sc->twe_state & (TWE_STATE_CTLR_BUSY | TWE_STATE_FRZN)) {
1092 	twe_requeue_ready(tr);
1093 	return (EBUSY);
1094     }
1095 
1096     bus_dmamap_sync(sc->twe_cmd_dmat, sc->twe_cmdmap, BUS_DMASYNC_PREWRITE);
1097 
1098     /*
1099      * If the command involves data, map that too.
1100      */
1101     if (tr->tr_data != NULL && ((tr->tr_flags & TWE_CMD_MAPPED) == 0)) {
1102 
1103 	/*
1104 	 * Data must be 512-byte aligned; allocate a fixup buffer if it's not.
1105 	 *
1106 	 * DragonFly's malloc only guarentees alignment for requests which
1107 	 * are power-of-2 sized.
1108 	 */
1109 	if (((vm_offset_t)tr->tr_data % TWE_ALIGNMENT) != 0) {
1110 	    int aligned_size;
1111 
1112 	    tr->tr_realdata = tr->tr_data;	/* save pointer to 'real' data */
1113 	    aligned_size = TWE_ALIGNMENT;
1114 	    while (aligned_size < tr->tr_length)
1115 		aligned_size <<= 1;
1116 	    tr->tr_flags |= TWE_CMD_ALIGNBUF;
1117 	    tr->tr_data = kmalloc(aligned_size, TWE_MALLOC_CLASS, M_INTWAIT);
1118 	    if (tr->tr_data == NULL) {
1119 		twe_printf(sc, "%s: malloc failed\n", __func__);
1120 		tr->tr_data = tr->tr_realdata; /* restore original data pointer */
1121 		return(ENOMEM);
1122 	    }
1123 	}
1124 
1125 	/*
1126 	 * Map the data buffer into bus space and build the s/g list.
1127 	 */
1128 	if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1129 	    error = bus_dmamap_load(sc->twe_immediate_dmat, sc->twe_immediate_map, sc->twe_immediate,
1130 			    tr->tr_length, twe_setup_data_dmamap, tr, BUS_DMA_NOWAIT);
1131 	} else {
1132 	    error = bus_dmamap_load(sc->twe_buffer_dmat, tr->tr_dmamap, tr->tr_data, tr->tr_length,
1133 				    twe_setup_data_dmamap, tr, 0);
1134 	}
1135 	if (error == EINPROGRESS) {
1136 	    tr->tr_flags |= TWE_CMD_IN_PROGRESS;
1137 	    sc->twe_state |= TWE_STATE_FRZN;
1138 	    error = 0;
1139 	}
1140     } else
1141 	if ((error = twe_start(tr)) == EBUSY) {
1142 	    sc->twe_state |= TWE_STATE_CTLR_BUSY;
1143 	    twe_requeue_ready(tr);
1144 	}
1145 
1146     return(error);
1147 }
1148 
1149 void
1150 twe_unmap_request(struct twe_request *tr)
1151 {
1152     struct twe_softc	*sc = tr->tr_sc;
1153 
1154     debug_called(4);
1155 
1156     bus_dmamap_sync(sc->twe_cmd_dmat, sc->twe_cmdmap, BUS_DMASYNC_POSTWRITE);
1157 
1158     /*
1159      * If the command involved data, unmap that too.
1160      */
1161     if (tr->tr_data != NULL) {
1162 	if (tr->tr_flags & TWE_CMD_DATAIN) {
1163 	    if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1164 		bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1165 				BUS_DMASYNC_POSTREAD);
1166 	    } else {
1167 		bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1168 				BUS_DMASYNC_POSTREAD);
1169 	    }
1170 
1171 	    /* if we're using an alignment buffer, and we're reading data, copy the real data in */
1172 	    if (tr->tr_flags & TWE_CMD_ALIGNBUF)
1173 		bcopy(tr->tr_data, tr->tr_realdata, tr->tr_length);
1174 	}
1175 	if (tr->tr_flags & TWE_CMD_DATAOUT) {
1176 	    if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1177 		bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1178 				BUS_DMASYNC_POSTWRITE);
1179 	    } else {
1180 		bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1181 				BUS_DMASYNC_POSTWRITE);
1182 	    }
1183 	}
1184 
1185 	if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1186 	    bus_dmamap_unload(sc->twe_immediate_dmat, sc->twe_immediate_map);
1187 	} else {
1188 	    bus_dmamap_unload(sc->twe_buffer_dmat, tr->tr_dmamap);
1189 	}
1190     }
1191 
1192     /* free alignment buffer if it was used */
1193     if (tr->tr_flags & TWE_CMD_ALIGNBUF) {
1194 	kfree(tr->tr_data, TWE_MALLOC_CLASS);
1195 	tr->tr_data = tr->tr_realdata;		/* restore 'real' data pointer */
1196     }
1197 }
1198 
1199 #ifdef TWE_DEBUG
1200 void twe_report(void);
1201 /********************************************************************************
1202  * Print current controller status, call from DDB.
1203  */
1204 void
1205 twe_report(void)
1206 {
1207     struct twe_softc	*sc;
1208     int			i;
1209 
1210     for (i = 0; (sc = devclass_get_softc(twe_devclass, i)) != NULL; i++)
1211 	twe_print_controller(sc);
1212     kprintf("twed: total bio count in %u  out %u\n", twed_bio_in, twed_bio_out);
1213 }
1214 #endif
1215