xref: /dragonfly/sys/dev/raid/twe/twe_freebsd.c (revision e6d22e9b)
1 /*-
2  * Copyright (c) 2000 Michael Smith
3  * Copyright (c) 2003 Paul Saab
4  * Copyright (c) 2003 Vinod Kashyap
5  * Copyright (c) 2000 BSDi
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/dev/twe/twe_freebsd.c,v 1.54 2012/11/17 01:52:19 svnexp Exp $
30  */
31 
32 /*
33  * FreeBSD-specific code.
34  */
35 
36 #include <dev/raid/twe/twe_compat.h>
37 #include <dev/raid/twe/twereg.h>
38 #include <dev/raid/twe/tweio.h>
39 #include <dev/raid/twe/twevar.h>
40 #include <dev/raid/twe/twe_tables.h>
41 #include <sys/dtype.h>
42 #include <sys/mplock2.h>
43 #include <sys/thread2.h>
44 
45 #include <vm/vm.h>
46 
47 static devclass_t	twe_devclass;
48 
49 #ifdef TWE_DEBUG
50 static u_int32_t	twed_bio_in;
51 #define TWED_BIO_IN	twed_bio_in++
52 static u_int32_t	twed_bio_out;
53 #define TWED_BIO_OUT	twed_bio_out++
54 #else
55 #define TWED_BIO_IN
56 #define TWED_BIO_OUT
57 #endif
58 
59 static void	twe_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
60 static void	twe_setup_request_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
61 
62 /********************************************************************************
63  ********************************************************************************
64                                                          Control device interface
65  ********************************************************************************
66  ********************************************************************************/
67 
68 static	d_open_t		twe_open;
69 static	d_close_t		twe_close;
70 static	d_ioctl_t		twe_ioctl_wrapper;
71 
72 static struct dev_ops twe_ops = {
73 	{ "twe", 0, D_MPSAFE },
74 	.d_open =	twe_open,
75 	.d_close =	twe_close,
76 	.d_ioctl =	twe_ioctl_wrapper,
77 };
78 
79 /********************************************************************************
80  * Accept an open operation on the control device.
81  */
82 static int
83 twe_open(struct dev_open_args *ap)
84 {
85     cdev_t			dev = ap->a_head.a_dev;
86     struct twe_softc		*sc = (struct twe_softc *)dev->si_drv1;
87 
88     TWE_IO_LOCK(sc);
89     if (sc->twe_state & TWE_STATE_DETACHING) {
90 	TWE_IO_UNLOCK(sc);
91 	return (ENXIO);
92     }
93     sc->twe_state |= TWE_STATE_OPEN;
94     TWE_IO_UNLOCK(sc);
95     return(0);
96 }
97 
98 /********************************************************************************
99  * Accept the last close on the control device.
100  */
101 static int
102 twe_close(struct dev_close_args *ap)
103 {
104     cdev_t			dev = ap->a_head.a_dev;
105     struct twe_softc		*sc = (struct twe_softc *)dev->si_drv1;
106 
107     TWE_IO_LOCK(sc);
108     sc->twe_state &= ~TWE_STATE_OPEN;
109     TWE_IO_UNLOCK(sc);
110     return (0);
111 }
112 
113 /********************************************************************************
114  * Handle controller-specific control operations.
115  */
116 static int
117 twe_ioctl_wrapper(struct dev_ioctl_args *ap)
118 {
119     cdev_t dev = ap->a_head.a_dev;
120     u_long cmd = ap->a_cmd;
121     caddr_t addr = ap->a_data;
122     struct twe_softc *sc = (struct twe_softc *)dev->si_drv1;
123 
124     return(twe_ioctl(sc, cmd, addr));
125 }
126 
127 /********************************************************************************
128  ********************************************************************************
129                                                              PCI device interface
130  ********************************************************************************
131  ********************************************************************************/
132 
133 static int	twe_probe(device_t dev);
134 static int	twe_attach(device_t dev);
135 static void	twe_free(struct twe_softc *sc);
136 static int	twe_detach(device_t dev);
137 static int	twe_shutdown(device_t dev);
138 static int	twe_suspend(device_t dev);
139 static int	twe_resume(device_t dev);
140 static void	twe_pci_intr(void *arg);
141 static void	twe_intrhook(void *arg);
142 
143 static device_method_t twe_methods[] = {
144     /* Device interface */
145     DEVMETHOD(device_probe,	twe_probe),
146     DEVMETHOD(device_attach,	twe_attach),
147     DEVMETHOD(device_detach,	twe_detach),
148     DEVMETHOD(device_shutdown,	twe_shutdown),
149     DEVMETHOD(device_suspend,	twe_suspend),
150     DEVMETHOD(device_resume,	twe_resume),
151 
152     DEVMETHOD_END
153 };
154 
155 static driver_t twe_pci_driver = {
156 	"twe",
157 	twe_methods,
158 	sizeof(struct twe_softc)
159 };
160 
161 DRIVER_MODULE(twe, pci, twe_pci_driver, twe_devclass, NULL, NULL);
162 
163 /********************************************************************************
164  * Match a 3ware Escalade ATA RAID controller.
165  */
166 static int
167 twe_probe(device_t dev)
168 {
169 
170     debug_called(4);
171 
172     if ((pci_get_vendor(dev) == TWE_VENDOR_ID) &&
173 	((pci_get_device(dev) == TWE_DEVICE_ID) ||
174 	 (pci_get_device(dev) == TWE_DEVICE_ID_ASIC))) {
175 	device_set_desc_copy(dev, TWE_DEVICE_NAME ". Driver version " TWE_DRIVER_VERSION_STRING);
176 	return(BUS_PROBE_DEFAULT);
177     }
178     return(ENXIO);
179 }
180 
181 /********************************************************************************
182  * Allocate resources, initialise the controller.
183  */
184 static int
185 twe_attach(device_t dev)
186 {
187     struct twe_softc	*sc;
188     int			rid, error;
189 
190     debug_called(4);
191 
192     /*
193      * Initialise the softc structure.
194      */
195     sc = device_get_softc(dev);
196     sc->twe_dev = dev;
197     lockinit(&sc->twe_io_lock, "twe I/O", 0, LK_CANRECURSE);
198     lockinit(&sc->twe_config_lock, "twe config", 0, LK_CANRECURSE);
199 
200     SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
201 	SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
202 	OID_AUTO, "driver_version", CTLFLAG_RD, TWE_DRIVER_VERSION_STRING, 0,
203 	"TWE driver version");
204 
205     /*
206      * Force the busmaster enable bit on, in case the BIOS forgot.
207      */
208     pci_enable_busmaster(dev);
209 
210     /*
211      * Allocate the PCI register window.
212      */
213     rid = TWE_IO_CONFIG_REG;
214     if ((sc->twe_io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
215         RF_ACTIVE)) == NULL) {
216 	twe_printf(sc, "can't allocate register window\n");
217 	twe_free(sc);
218 	return(ENXIO);
219     }
220 
221     /*
222      * Allocate the parent bus DMA tag appropriate for PCI.
223      */
224     if (bus_dma_tag_create(NULL, 				/* parent */
225 			   1, 0, 				/* alignment, boundary */
226 			   BUS_SPACE_MAXADDR_32BIT, 		/* lowaddr */
227 			   BUS_SPACE_MAXADDR, 			/* highaddr */
228 			   NULL, NULL, 				/* filter, filterarg */
229 			   MAXBSIZE, TWE_MAX_SGL_LENGTH,	/* maxsize, nsegments */
230 			   BUS_SPACE_MAXSIZE_32BIT,		/* maxsegsize */
231 			   0,					/* flags */
232 			   &sc->twe_parent_dmat)) {
233 	twe_printf(sc, "can't allocate parent DMA tag\n");
234 	twe_free(sc);
235 	return(ENOMEM);
236     }
237 
238     /*
239      * Allocate and connect our interrupt.
240      */
241     rid = 0;
242     if ((sc->twe_irq = bus_alloc_resource_any(sc->twe_dev, SYS_RES_IRQ,
243         &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
244 	twe_printf(sc, "can't allocate interrupt\n");
245 	twe_free(sc);
246 	return(ENXIO);
247     }
248     if (bus_setup_intr(sc->twe_dev, sc->twe_irq, INTR_MPSAFE,
249 			twe_pci_intr, sc, &sc->twe_intr, NULL)) {
250 	twe_printf(sc, "can't set up interrupt\n");
251 	twe_free(sc);
252 	return(ENXIO);
253     }
254 
255     /*
256      * Create DMA tag for mapping command's into controller-addressable space.
257      */
258     if (bus_dma_tag_create(sc->twe_parent_dmat, 	/* parent */
259 			   1, 0, 			/* alignment, boundary */
260 			   BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
261 			   BUS_SPACE_MAXADDR, 		/* highaddr */
262 			   NULL, NULL, 			/* filter, filterarg */
263 			   sizeof(TWE_Command) *
264 			   TWE_Q_LENGTH, 1,		/* maxsize, nsegments */
265 			   BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
266 			   0,				/* flags */
267 			   &sc->twe_cmd_dmat)) {
268 	twe_printf(sc, "can't allocate data buffer DMA tag\n");
269 	twe_free(sc);
270 	return(ENOMEM);
271     }
272     /*
273      * Allocate memory and make it available for DMA.
274      */
275     if (bus_dmamem_alloc(sc->twe_cmd_dmat, (void **)&sc->twe_cmd,
276 			 BUS_DMA_NOWAIT, &sc->twe_cmdmap)) {
277 	twe_printf(sc, "can't allocate command memory\n");
278 	return(ENOMEM);
279     }
280     bus_dmamap_load(sc->twe_cmd_dmat, sc->twe_cmdmap, sc->twe_cmd,
281 		    sizeof(TWE_Command) * TWE_Q_LENGTH,
282 		    twe_setup_request_dmamap, sc, 0);
283     bzero(sc->twe_cmd, sizeof(TWE_Command) * TWE_Q_LENGTH);
284 
285     /*
286      * Create DMA tag for mapping objects into controller-addressable space.
287      */
288     if (bus_dma_tag_create(sc->twe_parent_dmat, 	/* parent */
289 			   1, 0, 			/* alignment, boundary */
290 			   BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
291 			   BUS_SPACE_MAXADDR, 		/* highaddr */
292 			   NULL, NULL, 			/* filter, filterarg */
293 			   MAXBSIZE, TWE_MAX_SGL_LENGTH,/* maxsize, nsegments */
294 			   BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
295 			   BUS_DMA_ALLOCNOW,		/* flags */
296 			   &sc->twe_buffer_dmat)) {
297 	twe_printf(sc, "can't allocate data buffer DMA tag\n");
298 	twe_free(sc);
299 	return(ENOMEM);
300     }
301 
302     /*
303      * Create DMA tag for mapping objects into controller-addressable space.
304      */
305     if (bus_dma_tag_create(sc->twe_parent_dmat, 	/* parent */
306 			   1, 0, 			/* alignment, boundary */
307 			   BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
308 			   BUS_SPACE_MAXADDR, 		/* highaddr */
309 			   NULL, NULL, 			/* filter, filterarg */
310 			   MAXBSIZE, 1,			/* maxsize, nsegments */
311 			   BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
312 			   0,				/* flags */
313 			   &sc->twe_immediate_dmat)) {
314 	twe_printf(sc, "can't allocate data buffer DMA tag\n");
315 	twe_free(sc);
316 	return(ENOMEM);
317     }
318     /*
319      * Allocate memory for requests which cannot sleep or support continuation.
320      */
321      if (bus_dmamem_alloc(sc->twe_immediate_dmat, (void **)&sc->twe_immediate,
322 			  BUS_DMA_NOWAIT, &sc->twe_immediate_map)) {
323 	twe_printf(sc, "can't allocate memory for immediate requests\n");
324 	return(ENOMEM);
325      }
326 
327     /*
328      * Initialise the controller and driver core.
329      */
330     if ((error = twe_setup(sc))) {
331 	twe_free(sc);
332 	return(error);
333     }
334 
335     /*
336      * Print some information about the controller and configuration.
337      */
338     twe_describe_controller(sc);
339 
340     /*
341      * Create the control device.
342      */
343     sc->twe_dev_t = make_dev(&twe_ops, device_get_unit(sc->twe_dev),
344 			     UID_ROOT, GID_OPERATOR,
345 			     S_IRUSR | S_IWUSR, "twe%d",
346 			     device_get_unit(sc->twe_dev));
347     sc->twe_dev_t->si_drv1 = sc;
348 
349     /*
350      * Schedule ourselves to bring the controller up once interrupts are
351      * available.  This isn't strictly necessary, since we disable
352      * interrupts while probing the controller, but it is more in keeping
353      * with common practice for other disk devices.
354      */
355     sc->twe_ich.ich_func = twe_intrhook;
356     sc->twe_ich.ich_arg = sc;
357     sc->twe_ich.ich_desc = "twe";
358     if (config_intrhook_establish(&sc->twe_ich) != 0) {
359 	twe_printf(sc, "can't establish configuration hook\n");
360 	twe_free(sc);
361 	return(ENXIO);
362     }
363 
364     return(0);
365 }
366 
367 /********************************************************************************
368  * Free all of the resources associated with (sc).
369  *
370  * Should not be called if the controller is active.
371  */
372 static void
373 twe_free(struct twe_softc *sc)
374 {
375     struct twe_request	*tr;
376 
377     debug_called(4);
378 
379     /* throw away any command buffers */
380     while ((tr = twe_dequeue_free(sc)) != NULL)
381 	twe_free_request(tr);
382 
383     if (sc->twe_cmd != NULL) {
384 	bus_dmamap_unload(sc->twe_cmd_dmat, sc->twe_cmdmap);
385 	bus_dmamem_free(sc->twe_cmd_dmat, sc->twe_cmd, sc->twe_cmdmap);
386     }
387 
388     if (sc->twe_immediate != NULL) {
389 	bus_dmamap_unload(sc->twe_immediate_dmat, sc->twe_immediate_map);
390 	bus_dmamem_free(sc->twe_immediate_dmat, sc->twe_immediate,
391 			sc->twe_immediate_map);
392     }
393 
394     if (sc->twe_immediate_dmat)
395 	bus_dma_tag_destroy(sc->twe_immediate_dmat);
396 
397     /* destroy the data-transfer DMA tag */
398     if (sc->twe_buffer_dmat)
399 	bus_dma_tag_destroy(sc->twe_buffer_dmat);
400 
401     /* disconnect the interrupt handler */
402     if (sc->twe_intr)
403 	bus_teardown_intr(sc->twe_dev, sc->twe_irq, sc->twe_intr);
404     if (sc->twe_irq != NULL)
405 	bus_release_resource(sc->twe_dev, SYS_RES_IRQ, 0, sc->twe_irq);
406 
407     /* destroy the parent DMA tag */
408     if (sc->twe_parent_dmat)
409 	bus_dma_tag_destroy(sc->twe_parent_dmat);
410 
411     /* release the register window mapping */
412     if (sc->twe_io != NULL)
413 	bus_release_resource(sc->twe_dev, SYS_RES_IOPORT, TWE_IO_CONFIG_REG, sc->twe_io);
414 
415     /* destroy control device */
416     if (sc->twe_dev_t != NULL)
417 	destroy_dev(sc->twe_dev_t);
418     dev_ops_remove_minor(&twe_ops, device_get_unit(sc->twe_dev));
419 
420     lockuninit(&sc->twe_config_lock);
421     lockuninit(&sc->twe_io_lock);
422 }
423 
424 /********************************************************************************
425  * Disconnect from the controller completely, in preparation for unload.
426  */
427 static int
428 twe_detach(device_t dev)
429 {
430     struct twe_softc	*sc = device_get_softc(dev);
431 
432     debug_called(4);
433 
434     TWE_IO_LOCK(sc);
435     if (sc->twe_state & TWE_STATE_OPEN) {
436 	TWE_IO_UNLOCK(sc);
437 	return (EBUSY);
438     }
439     sc->twe_state |= TWE_STATE_DETACHING;
440     TWE_IO_UNLOCK(sc);
441 
442     /*
443      * Shut the controller down.
444      */
445     if (twe_shutdown(dev)) {
446 	TWE_IO_LOCK(sc);
447 	sc->twe_state &= ~TWE_STATE_DETACHING;
448 	TWE_IO_UNLOCK(sc);
449 	return (EBUSY);
450     }
451 
452     twe_free(sc);
453 
454     return(0);
455 }
456 
457 /********************************************************************************
458  * Bring the controller down to a dormant state and detach all child devices.
459  *
460  * Note that we can assume that the bioq on the controller is empty, as we won't
461  * allow shutdown if any device is open.
462  */
463 static int
464 twe_shutdown(device_t dev)
465 {
466     struct twe_softc	*sc = device_get_softc(dev);
467     int			i, error = 0;
468 
469     debug_called(4);
470 
471     /*
472      * Delete all our child devices.
473      */
474     TWE_CONFIG_LOCK(sc);
475     for (i = 0; i < TWE_MAX_UNITS; i++) {
476 	if (sc->twe_drive[i].td_disk != 0) {
477 	    if ((error = twe_detach_drive(sc, i)) != 0) {
478 		TWE_CONFIG_UNLOCK(sc);
479 		return (error);
480 	    }
481 	}
482     }
483     TWE_CONFIG_UNLOCK(sc);
484 
485     /*
486      * Bring the controller down.
487      */
488     TWE_IO_LOCK(sc);
489     twe_deinit(sc);
490     TWE_IO_UNLOCK(sc);
491 
492     return(0);
493 }
494 
495 /********************************************************************************
496  * Bring the controller to a quiescent state, ready for system suspend.
497  */
498 static int
499 twe_suspend(device_t dev)
500 {
501     struct twe_softc	*sc = device_get_softc(dev);
502 
503     debug_called(4);
504 
505     TWE_IO_LOCK(sc);
506     sc->twe_state |= TWE_STATE_SUSPEND;
507     TWE_IO_UNLOCK(sc);
508 
509     twe_disable_interrupts(sc);
510     crit_exit();
511 
512     return(0);
513 }
514 
515 /********************************************************************************
516  * Bring the controller back to a state ready for operation.
517  */
518 static int
519 twe_resume(device_t dev)
520 {
521     struct twe_softc	*sc = device_get_softc(dev);
522 
523     debug_called(4);
524 
525     TWE_IO_LOCK(sc);
526     sc->twe_state &= ~TWE_STATE_SUSPEND;
527     twe_enable_interrupts(sc);
528     TWE_IO_UNLOCK(sc);
529 
530     return(0);
531 }
532 
533 /*******************************************************************************
534  * Take an interrupt, or be poked by other code to look for interrupt-worthy
535  * status.
536  */
537 static void
538 twe_pci_intr(void *arg)
539 {
540     struct twe_softc *sc = arg;
541 
542     TWE_IO_LOCK(sc);
543     twe_intr(sc);
544     TWE_IO_UNLOCK(sc);
545 }
546 
547 /********************************************************************************
548  * Delayed-startup hook
549  */
550 static void
551 twe_intrhook(void *arg)
552 {
553     struct twe_softc		*sc = (struct twe_softc *)arg;
554 
555     /* pull ourselves off the intrhook chain */
556     config_intrhook_disestablish(&sc->twe_ich);
557 
558     /* call core startup routine */
559     twe_init(sc);
560 }
561 
562 /********************************************************************************
563  * Given a detected drive, attach it to the bio interface.
564  *
565  * This is called from twe_add_unit.
566  */
567 int
568 twe_attach_drive(struct twe_softc *sc, struct twe_drive *dr)
569 {
570     char	buf[80];
571     int		error;
572 
573     get_mplock();
574     dr->td_disk =  device_add_child(sc->twe_dev, NULL, -1);
575     if (dr->td_disk == NULL) {
576 	rel_mplock();
577 	twe_printf(sc, "Cannot add unit\n");
578 	return (EIO);
579     }
580     device_set_ivars(dr->td_disk, dr);
581 
582     /*
583      * XXX It would make sense to test the online/initialising bits, but they seem to be
584      * always set...
585      */
586     ksprintf(buf, "Unit %d, %s, %s",
587 	    dr->td_twe_unit,
588 	    twe_describe_code(twe_table_unittype, dr->td_type),
589 	    twe_describe_code(twe_table_unitstate, dr->td_state & TWE_PARAM_UNITSTATUS_MASK));
590     device_set_desc_copy(dr->td_disk, buf);
591 
592     error = device_probe_and_attach(dr->td_disk);
593     rel_mplock();
594     if (error != 0) {
595 	twe_printf(sc, "Cannot attach unit to controller. error = %d\n", error);
596 	return (EIO);
597     }
598     return (0);
599 }
600 
601 /********************************************************************************
602  * Detach the specified unit if it exsists
603  *
604  * This is called from twe_del_unit.
605  */
606 int
607 twe_detach_drive(struct twe_softc *sc, int unit)
608 {
609     int error = 0;
610 
611     TWE_CONFIG_ASSERT_LOCKED(sc);
612     get_mplock();
613     error = device_delete_child(sc->twe_dev, sc->twe_drive[unit].td_disk);
614     rel_mplock();
615     if (error != 0) {
616 	twe_printf(sc, "failed to delete unit %d\n", unit);
617 	return(error);
618     }
619     bzero(&sc->twe_drive[unit], sizeof(sc->twe_drive[unit]));
620     return(error);
621 }
622 
623 /********************************************************************************
624  * Clear a PCI parity error.
625  */
626 void
627 twe_clear_pci_parity_error(struct twe_softc *sc)
628 {
629     TWE_CONTROL(sc, TWE_CONTROL_CLEAR_PARITY_ERROR);
630     pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PARITY_ERROR, 2);
631 }
632 
633 /********************************************************************************
634  * Clear a PCI abort.
635  */
636 void
637 twe_clear_pci_abort(struct twe_softc *sc)
638 {
639     TWE_CONTROL(sc, TWE_CONTROL_CLEAR_PCI_ABORT);
640     pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PCI_ABORT, 2);
641 }
642 
643 /********************************************************************************
644  ********************************************************************************
645                                                                       Disk device
646  ********************************************************************************
647  ********************************************************************************/
648 
649 /*
650  * Disk device bus interface
651  */
652 static int twed_probe(device_t dev);
653 static int twed_attach(device_t dev);
654 static int twed_detach(device_t dev);
655 
656 static device_method_t twed_methods[] = {
657     DEVMETHOD(device_probe,	twed_probe),
658     DEVMETHOD(device_attach,	twed_attach),
659     DEVMETHOD(device_detach,	twed_detach),
660     DEVMETHOD_END
661 };
662 
663 static driver_t twed_driver = {
664     "twed",
665     twed_methods,
666     sizeof(struct twed_softc)
667 };
668 
669 static devclass_t	twed_devclass;
670 DRIVER_MODULE(twed, twe, twed_driver, twed_devclass, NULL, NULL);
671 
672 /*
673  * Disk device control interface.
674  */
675 static	d_open_t	twed_open;
676 static	d_close_t	twed_close;
677 static	d_strategy_t	twed_strategy;
678 static	d_dump_t	twed_dump;
679 
680 static struct dev_ops twed_ops = {
681 	{ "twed", 0, D_DISK | D_MPSAFE},
682 	.d_open =	twed_open,
683 	.d_close =	twed_close,
684 	.d_read =	physread,
685 	.d_write =	physwrite,
686 	.d_strategy =	twed_strategy,
687 	.d_dump =	twed_dump,
688 };
689 
690 /********************************************************************************
691  * Handle open from generic layer.
692  *
693  * Note that this is typically only called by the diskslice code, and not
694  * for opens on subdevices (eg. slices, partitions).
695  */
696 static int
697 twed_open(struct dev_open_args *ap)
698 {
699     cdev_t dev = ap->a_head.a_dev;
700     struct twed_softc	*sc = (struct twed_softc *)dev->si_drv1;
701 
702     debug_called(4);
703 
704     if (sc == NULL)
705 	return (ENXIO);
706 
707     /* check that the controller is up and running */
708     if (sc->twed_controller->twe_state & TWE_STATE_SHUTDOWN)
709 	return(ENXIO);
710 
711     sc->twed_flags |= TWED_OPEN;
712     return (0);
713 }
714 
715 /********************************************************************************
716  * Handle last close of the disk device.
717  */
718 static int
719 twed_close(struct dev_close_args *ap)
720 {
721     cdev_t dev = ap->a_head.a_dev;
722     struct twed_softc	*sc = (struct twed_softc *)dev->si_drv1;
723 
724     debug_called(4);
725 
726     if (sc == NULL)
727 	return (ENXIO);
728 
729     sc->twed_flags &= ~TWED_OPEN;
730     return (0);
731 }
732 
733 /********************************************************************************
734  * Handle an I/O request.
735  */
736 static int
737 twed_strategy(struct dev_strategy_args *ap)
738 {
739     cdev_t dev = ap->a_head.a_dev;
740     struct bio *bio = ap->a_bio;
741     struct twed_softc *sc = dev->si_drv1;
742     struct buf *bp = bio->bio_buf;
743 
744     bio->bio_driver_info = sc;
745 
746     debug_called(4);
747 
748     TWED_BIO_IN;
749 
750     /* bogus disk? */
751     if (sc == NULL || sc->twed_drive->td_disk == NULL) {
752 	bp->b_error = EINVAL;
753 	bp->b_flags |= B_ERROR;
754 	kprintf("twe: bio for invalid disk!\n");
755 	biodone(bio);
756 	TWED_BIO_OUT;
757 	return(0);
758     }
759 
760     /* perform accounting */
761     devstat_start_transaction(&sc->twed_stats);
762 
763     /* queue the bio on the controller */
764     TWE_IO_LOCK(sc->twed_controller);
765     twe_enqueue_bio(sc->twed_controller, bio);
766 
767     /* poke the controller to start I/O */
768     twe_startio(sc->twed_controller);
769     TWE_IO_UNLOCK(sc->twed_controller);
770     return(0);
771 }
772 
773 /********************************************************************************
774  * System crashdump support
775  */
776 static int
777 twed_dump(struct dev_dump_args *ap)
778 {
779     cdev_t dev = ap->a_head.a_dev;
780     size_t length = ap->a_length;
781     off_t offset = ap->a_offset;
782     void *virtual = ap->a_virtual;
783     struct twed_softc	*twed_sc;
784     struct twe_softc	*twe_sc;
785     int			error;
786 
787     twed_sc = dev->si_drv1;
788     if (twed_sc == NULL)
789 	return(ENXIO);
790     twe_sc  = (struct twe_softc *)twed_sc->twed_controller;
791 
792     if (length > 0) {
793 	if ((error = twe_dump_blocks(twe_sc, twed_sc->twed_drive->td_twe_unit, offset / TWE_BLOCK_SIZE, virtual, length / TWE_BLOCK_SIZE)) != 0)
794 	    return(error);
795     }
796     return(0);
797 }
798 
799 /********************************************************************************
800  * Handle completion of an I/O request.
801  */
802 void
803 twed_intr(struct bio *bio)
804 {
805     struct buf *bp = bio->bio_buf;
806     struct twed_softc *sc = bio->bio_driver_info;
807 
808     debug_called(4);
809 
810     /* if no error, transfer completed */
811     if (!(bp->b_flags & B_ERROR))
812 	bp->b_resid = 0;
813     devstat_end_transaction_buf(&sc->twed_stats, bp);
814     biodone(bio);
815     TWED_BIO_OUT;
816 }
817 
818 /********************************************************************************
819  * Default probe stub.
820  */
821 static int
822 twed_probe(device_t dev)
823 {
824     return (0);
825 }
826 
827 /********************************************************************************
828  * Attach a unit to the controller.
829  */
830 static int
831 twed_attach(device_t dev)
832 {
833     struct twed_softc	*sc;
834     struct disk_info	info;
835     device_t		parent;
836     cdev_t		dsk;
837 
838     debug_called(4);
839 
840     /* initialise our softc */
841     sc = device_get_softc(dev);
842     parent = device_get_parent(dev);
843     sc->twed_controller = (struct twe_softc *)device_get_softc(parent);
844     sc->twed_drive = device_get_ivars(dev);
845     sc->twed_dev = dev;
846 
847     /* report the drive */
848     twed_printf(sc, "%uMB (%u sectors)\n",
849 		sc->twed_drive->td_size / ((1024 * 1024) / TWE_BLOCK_SIZE),
850 		sc->twed_drive->td_size);
851 
852     /* attach a generic disk device to ourselves */
853 
854     sc->twed_drive->td_sys_unit = device_get_unit(dev);
855 
856     devstat_add_entry(&sc->twed_stats, "twed", sc->twed_drive->td_sys_unit,
857 			TWE_BLOCK_SIZE,
858 			DEVSTAT_NO_ORDERED_TAGS,
859 			DEVSTAT_TYPE_STORARRAY | DEVSTAT_TYPE_IF_OTHER,
860 			DEVSTAT_PRIORITY_ARRAY);
861 
862     dsk = disk_create(sc->twed_drive->td_sys_unit, &sc->twed_disk, &twed_ops);
863     dsk->si_drv1 = sc;
864     sc->twed_dev_t = dsk;
865 
866     /* set the maximum I/O size to the theoretical maximum allowed by the S/G list size */
867     dsk->si_iosize_max = (TWE_MAX_SGL_LENGTH - 1) * PAGE_SIZE;
868 
869     /*
870      * Set disk info, as it appears that all needed data is available already.
871      * Setting the disk info will also cause the probing to start.
872      */
873     bzero(&info, sizeof(info));
874     info.d_media_blksize    = TWE_BLOCK_SIZE;	/* mandatory */
875     info.d_media_blocks	    = sc->twed_drive->td_size;
876 
877     info.d_type		= DTYPE_ESDI;		/* optional */
878     info.d_secpertrack	= sc->twed_drive->td_sectors;
879     info.d_nheads	= sc->twed_drive->td_heads;
880     info.d_ncylinders	= sc->twed_drive->td_cylinders;
881     info.d_secpercyl	= sc->twed_drive->td_sectors * sc->twed_drive->td_heads;
882 
883     disk_setdiskinfo(&sc->twed_disk, &info);
884 
885     return (0);
886 }
887 
888 /********************************************************************************
889  * Disconnect ourselves from the system.
890  */
891 static int
892 twed_detach(device_t dev)
893 {
894     struct twed_softc *sc = (struct twed_softc *)device_get_softc(dev);
895 
896     debug_called(4);
897 
898     if (sc->twed_flags & TWED_OPEN)
899 	return(EBUSY);
900 
901     devstat_remove_entry(&sc->twed_stats);
902     disk_destroy(&sc->twed_disk);
903 
904     return(0);
905 }
906 
907 /********************************************************************************
908  ********************************************************************************
909                                                                              Misc
910  ********************************************************************************
911  ********************************************************************************/
912 
913 /********************************************************************************
914  * Allocate a command buffer
915  */
916 static MALLOC_DEFINE(TWE_MALLOC_CLASS, "twe_commands", "twe commands");
917 
918 struct twe_request *
919 twe_allocate_request(struct twe_softc *sc, int tag)
920 {
921     struct twe_request	*tr;
922     int aligned_size;
923 
924     /*
925      * TWE requires requests to be 512-byte aligned.  Depend on malloc()
926      * guarenteeing alignment for power-of-2 requests.  Note that the old
927      * (FreeBSD-4.x) malloc code aligned all requests, but the new slab
928      * allocator only guarentees same-size alignment for power-of-2 requests.
929      */
930     aligned_size = (sizeof(struct twe_request) + TWE_ALIGNMASK) &
931 	~TWE_ALIGNMASK;
932     tr = kmalloc(aligned_size, TWE_MALLOC_CLASS, M_INTWAIT | M_ZERO);
933     tr->tr_sc = sc;
934     tr->tr_tag = tag;
935     if (bus_dmamap_create(sc->twe_buffer_dmat, 0, &tr->tr_dmamap)) {
936 	twe_free_request(tr);
937 	twe_printf(sc, "unable to allocate dmamap for tag %d\n", tag);
938 	return(NULL);
939     }
940     return(tr);
941 }
942 
943 /********************************************************************************
944  * Permanently discard a command buffer.
945  */
946 void
947 twe_free_request(struct twe_request *tr)
948 {
949     struct twe_softc	*sc = tr->tr_sc;
950 
951     debug_called(4);
952 
953     bus_dmamap_destroy(sc->twe_buffer_dmat, tr->tr_dmamap);
954     kfree(tr, TWE_MALLOC_CLASS);
955 }
956 
957 /********************************************************************************
958  * Map/unmap (tr)'s command and data in the controller's addressable space.
959  *
960  * These routines ensure that the data which the controller is going to try to
961  * access is actually visible to the controller, in a machine-independant
962  * fashion.  Due to a hardware limitation, I/O buffers must be 512-byte aligned
963  * and we take care of that here as well.
964  */
965 static void
966 twe_fillin_sgl(TWE_SG_Entry *sgl, bus_dma_segment_t *segs, int nsegments, int max_sgl)
967 {
968     int i;
969 
970     for (i = 0; i < nsegments; i++) {
971 	sgl[i].address = segs[i].ds_addr;
972 	sgl[i].length = segs[i].ds_len;
973     }
974     for (; i < max_sgl; i++) {				/* XXX necessary? */
975 	sgl[i].address = 0;
976 	sgl[i].length = 0;
977     }
978 }
979 
980 static void
981 twe_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
982 {
983     struct twe_request	*tr = (struct twe_request *)arg;
984     struct twe_softc	*sc = tr->tr_sc;
985     TWE_Command		*cmd = TWE_FIND_COMMAND(tr);
986 
987     debug_called(4);
988 
989     if (tr->tr_flags & TWE_CMD_MAPPED)
990 	panic("already mapped command");
991 
992     tr->tr_flags |= TWE_CMD_MAPPED;
993 
994     if (tr->tr_flags & TWE_CMD_IN_PROGRESS)
995 	sc->twe_state &= ~TWE_STATE_FRZN;
996     /* save base of first segment in command (applicable if there only one segment) */
997     tr->tr_dataphys = segs[0].ds_addr;
998 
999     /* correct command size for s/g list size */
1000     cmd->generic.size += 2 * nsegments;
1001 
1002     /*
1003      * Due to the fact that parameter and I/O commands have the scatter/gather list in
1004      * different places, we need to determine which sort of command this actually is
1005      * before we can populate it correctly.
1006      */
1007     switch(cmd->generic.opcode) {
1008     case TWE_OP_GET_PARAM:
1009     case TWE_OP_SET_PARAM:
1010 	cmd->generic.sgl_offset = 2;
1011 	twe_fillin_sgl(&cmd->param.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
1012 	break;
1013     case TWE_OP_READ:
1014     case TWE_OP_WRITE:
1015 	cmd->generic.sgl_offset = 3;
1016 	twe_fillin_sgl(&cmd->io.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
1017 	break;
1018     case TWE_OP_ATA_PASSTHROUGH:
1019 	cmd->generic.sgl_offset = 5;
1020 	twe_fillin_sgl(&cmd->ata.sgl[0], segs, nsegments, TWE_MAX_ATA_SGL_LENGTH);
1021 	break;
1022     default:
1023 	/*
1024 	 * Fall back to what the linux driver does.
1025 	 * Do this because the API may send an opcode
1026 	 * the driver knows nothing about and this will
1027 	 * at least stop PCIABRT's from hosing us.
1028 	 */
1029 	switch (cmd->generic.sgl_offset) {
1030 	case 2:
1031 	    twe_fillin_sgl(&cmd->param.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
1032 	    break;
1033 	case 3:
1034 	    twe_fillin_sgl(&cmd->io.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
1035 	    break;
1036 	case 5:
1037 	    twe_fillin_sgl(&cmd->ata.sgl[0], segs, nsegments, TWE_MAX_ATA_SGL_LENGTH);
1038 	    break;
1039 	}
1040     }
1041 
1042     if (tr->tr_flags & TWE_CMD_DATAIN) {
1043 	if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1044 	    bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1045 			    BUS_DMASYNC_PREREAD);
1046 	} else {
1047 	    bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1048 			    BUS_DMASYNC_PREREAD);
1049 	}
1050     }
1051 
1052     if (tr->tr_flags & TWE_CMD_DATAOUT) {
1053 	/*
1054 	 * if we're using an alignment buffer, and we're writing data
1055 	 * copy the real data out
1056 	 */
1057 	if (tr->tr_flags & TWE_CMD_ALIGNBUF)
1058 	    bcopy(tr->tr_realdata, tr->tr_data, tr->tr_length);
1059 
1060 	if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1061 	    bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1062 			    BUS_DMASYNC_PREWRITE);
1063 	} else {
1064 	    bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1065 			    BUS_DMASYNC_PREWRITE);
1066 	}
1067     }
1068 
1069     if (twe_start(tr) == EBUSY) {
1070 	tr->tr_sc->twe_state |= TWE_STATE_CTLR_BUSY;
1071 	twe_requeue_ready(tr);
1072     }
1073 }
1074 
1075 static void
1076 twe_setup_request_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1077 {
1078     struct twe_softc	*sc = (struct twe_softc *)arg;
1079 
1080     debug_called(4);
1081 
1082     /* command can't cross a page boundary */
1083     sc->twe_cmdphys = segs[0].ds_addr;
1084 }
1085 
1086 int
1087 twe_map_request(struct twe_request *tr)
1088 {
1089     struct twe_softc	*sc = tr->tr_sc;
1090     int			error = 0;
1091 
1092     debug_called(4);
1093 
1094     twe_lockassert(&sc->twe_io_lock);
1095     if (sc->twe_state & (TWE_STATE_CTLR_BUSY | TWE_STATE_FRZN)) {
1096 	twe_requeue_ready(tr);
1097 	return (EBUSY);
1098     }
1099 
1100     bus_dmamap_sync(sc->twe_cmd_dmat, sc->twe_cmdmap, BUS_DMASYNC_PREWRITE);
1101 
1102     /*
1103      * If the command involves data, map that too.
1104      */
1105     if (tr->tr_data != NULL && ((tr->tr_flags & TWE_CMD_MAPPED) == 0)) {
1106 
1107 	/*
1108 	 * Data must be 512-byte aligned; allocate a fixup buffer if it's not.
1109 	 *
1110 	 * DragonFly's malloc only guarentees alignment for requests which
1111 	 * are power-of-2 sized.
1112 	 */
1113 	if (((vm_offset_t)tr->tr_data % TWE_ALIGNMENT) != 0) {
1114 	    int aligned_size;
1115 
1116 	    tr->tr_realdata = tr->tr_data;	/* save pointer to 'real' data */
1117 	    aligned_size = TWE_ALIGNMENT;
1118 	    while (aligned_size < tr->tr_length)
1119 		aligned_size <<= 1;
1120 	    tr->tr_flags |= TWE_CMD_ALIGNBUF;
1121 	    tr->tr_data = kmalloc(aligned_size, TWE_MALLOC_CLASS, M_INTWAIT);
1122 	    if (tr->tr_data == NULL) {
1123 		twe_printf(sc, "%s: malloc failed\n", __func__);
1124 		tr->tr_data = tr->tr_realdata; /* restore original data pointer */
1125 		return(ENOMEM);
1126 	    }
1127 	}
1128 
1129 	/*
1130 	 * Map the data buffer into bus space and build the s/g list.
1131 	 */
1132 	if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1133 	    error = bus_dmamap_load(sc->twe_immediate_dmat, sc->twe_immediate_map, sc->twe_immediate,
1134 			    tr->tr_length, twe_setup_data_dmamap, tr, BUS_DMA_NOWAIT);
1135 	} else {
1136 	    error = bus_dmamap_load(sc->twe_buffer_dmat, tr->tr_dmamap, tr->tr_data, tr->tr_length,
1137 				    twe_setup_data_dmamap, tr, 0);
1138 	}
1139 	if (error == EINPROGRESS) {
1140 	    tr->tr_flags |= TWE_CMD_IN_PROGRESS;
1141 	    sc->twe_state |= TWE_STATE_FRZN;
1142 	    error = 0;
1143 	}
1144     } else
1145 	if ((error = twe_start(tr)) == EBUSY) {
1146 	    sc->twe_state |= TWE_STATE_CTLR_BUSY;
1147 	    twe_requeue_ready(tr);
1148 	}
1149 
1150     return(error);
1151 }
1152 
1153 void
1154 twe_unmap_request(struct twe_request *tr)
1155 {
1156     struct twe_softc	*sc = tr->tr_sc;
1157 
1158     debug_called(4);
1159 
1160     bus_dmamap_sync(sc->twe_cmd_dmat, sc->twe_cmdmap, BUS_DMASYNC_POSTWRITE);
1161 
1162     /*
1163      * If the command involved data, unmap that too.
1164      */
1165     if (tr->tr_data != NULL) {
1166 	if (tr->tr_flags & TWE_CMD_DATAIN) {
1167 	    if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1168 		bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1169 				BUS_DMASYNC_POSTREAD);
1170 	    } else {
1171 		bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1172 				BUS_DMASYNC_POSTREAD);
1173 	    }
1174 
1175 	    /* if we're using an alignment buffer, and we're reading data, copy the real data in */
1176 	    if (tr->tr_flags & TWE_CMD_ALIGNBUF)
1177 		bcopy(tr->tr_data, tr->tr_realdata, tr->tr_length);
1178 	}
1179 	if (tr->tr_flags & TWE_CMD_DATAOUT) {
1180 	    if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1181 		bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1182 				BUS_DMASYNC_POSTWRITE);
1183 	    } else {
1184 		bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1185 				BUS_DMASYNC_POSTWRITE);
1186 	    }
1187 	}
1188 
1189 	if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1190 	    bus_dmamap_unload(sc->twe_immediate_dmat, sc->twe_immediate_map);
1191 	} else {
1192 	    bus_dmamap_unload(sc->twe_buffer_dmat, tr->tr_dmamap);
1193 	}
1194     }
1195 
1196     /* free alignment buffer if it was used */
1197     if (tr->tr_flags & TWE_CMD_ALIGNBUF) {
1198 	kfree(tr->tr_data, TWE_MALLOC_CLASS);
1199 	tr->tr_data = tr->tr_realdata;		/* restore 'real' data pointer */
1200     }
1201 }
1202 
1203 #ifdef TWE_DEBUG
1204 void twe_report(void);
1205 /********************************************************************************
1206  * Print current controller status, call from DDB.
1207  */
1208 void
1209 twe_report(void)
1210 {
1211     struct twe_softc	*sc;
1212     int			i;
1213 
1214     for (i = 0; (sc = devclass_get_softc(twe_devclass, i)) != NULL; i++)
1215 	twe_print_controller(sc);
1216     kprintf("twed: total bio count in %u  out %u\n", twed_bio_in, twed_bio_out);
1217 }
1218 #endif
1219