xref: /dragonfly/sys/dev/raid/twe/twe_freebsd.c (revision 267c04fd)
1 /*-
2  * Copyright (c) 2000 Michael Smith
3  * Copyright (c) 2003 Paul Saab
4  * Copyright (c) 2003 Vinod Kashyap
5  * Copyright (c) 2000 BSDi
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/dev/twe/twe_freebsd.c,v 1.54 2012/11/17 01:52:19 svnexp Exp $
30  */
31 
32 /*
33  * FreeBSD-specific code.
34  */
35 
36 #include <dev/raid/twe/twe_compat.h>
37 #include <dev/raid/twe/twereg.h>
38 #include <dev/raid/twe/tweio.h>
39 #include <dev/raid/twe/twevar.h>
40 #include <dev/raid/twe/twe_tables.h>
41 #include <sys/dtype.h>
42 #include <sys/mplock2.h>
43 
44 #include <vm/vm.h>
45 
46 static devclass_t	twe_devclass;
47 
48 #ifdef TWE_DEBUG
49 static u_int32_t	twed_bio_in;
50 #define TWED_BIO_IN	twed_bio_in++
51 static u_int32_t	twed_bio_out;
52 #define TWED_BIO_OUT	twed_bio_out++
53 #else
54 #define TWED_BIO_IN
55 #define TWED_BIO_OUT
56 #endif
57 
58 static void	twe_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
59 static void	twe_setup_request_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
60 
61 /********************************************************************************
62  ********************************************************************************
63                                                          Control device interface
64  ********************************************************************************
65  ********************************************************************************/
66 
67 static	d_open_t		twe_open;
68 static	d_close_t		twe_close;
69 static	d_ioctl_t		twe_ioctl_wrapper;
70 
71 static struct dev_ops twe_ops = {
72 	{ "twe", 0, D_MPSAFE },
73 	.d_open =	twe_open,
74 	.d_close =	twe_close,
75 	.d_ioctl =	twe_ioctl_wrapper,
76 };
77 
78 /********************************************************************************
79  * Accept an open operation on the control device.
80  */
81 static int
82 twe_open(struct dev_open_args *ap)
83 {
84     cdev_t			dev = ap->a_head.a_dev;
85     struct twe_softc		*sc = (struct twe_softc *)dev->si_drv1;
86 
87     TWE_IO_LOCK(sc);
88     if (sc->twe_state & TWE_STATE_DETACHING) {
89 	TWE_IO_UNLOCK(sc);
90 	return (ENXIO);
91     }
92     sc->twe_state |= TWE_STATE_OPEN;
93     TWE_IO_UNLOCK(sc);
94     return(0);
95 }
96 
97 /********************************************************************************
98  * Accept the last close on the control device.
99  */
100 static int
101 twe_close(struct dev_close_args *ap)
102 {
103     cdev_t			dev = ap->a_head.a_dev;
104     struct twe_softc		*sc = (struct twe_softc *)dev->si_drv1;
105 
106     TWE_IO_LOCK(sc);
107     sc->twe_state &= ~TWE_STATE_OPEN;
108     TWE_IO_UNLOCK(sc);
109     return (0);
110 }
111 
112 /********************************************************************************
113  * Handle controller-specific control operations.
114  */
115 static int
116 twe_ioctl_wrapper(struct dev_ioctl_args *ap)
117 {
118     cdev_t dev = ap->a_head.a_dev;
119     u_long cmd = ap->a_cmd;
120     caddr_t addr = ap->a_data;
121     struct twe_softc *sc = (struct twe_softc *)dev->si_drv1;
122 
123     return(twe_ioctl(sc, cmd, addr));
124 }
125 
126 /********************************************************************************
127  ********************************************************************************
128                                                              PCI device interface
129  ********************************************************************************
130  ********************************************************************************/
131 
132 static int	twe_probe(device_t dev);
133 static int	twe_attach(device_t dev);
134 static void	twe_free(struct twe_softc *sc);
135 static int	twe_detach(device_t dev);
136 static int	twe_shutdown(device_t dev);
137 static int	twe_suspend(device_t dev);
138 static int	twe_resume(device_t dev);
139 static void	twe_pci_intr(void *arg);
140 static void	twe_intrhook(void *arg);
141 
142 static device_method_t twe_methods[] = {
143     /* Device interface */
144     DEVMETHOD(device_probe,	twe_probe),
145     DEVMETHOD(device_attach,	twe_attach),
146     DEVMETHOD(device_detach,	twe_detach),
147     DEVMETHOD(device_shutdown,	twe_shutdown),
148     DEVMETHOD(device_suspend,	twe_suspend),
149     DEVMETHOD(device_resume,	twe_resume),
150 
151     DEVMETHOD_END
152 };
153 
154 static driver_t twe_pci_driver = {
155 	"twe",
156 	twe_methods,
157 	sizeof(struct twe_softc)
158 };
159 
160 DRIVER_MODULE(twe, pci, twe_pci_driver, twe_devclass, NULL, NULL);
161 
162 /********************************************************************************
163  * Match a 3ware Escalade ATA RAID controller.
164  */
165 static int
166 twe_probe(device_t dev)
167 {
168 
169     debug_called(4);
170 
171     if ((pci_get_vendor(dev) == TWE_VENDOR_ID) &&
172 	((pci_get_device(dev) == TWE_DEVICE_ID) ||
173 	 (pci_get_device(dev) == TWE_DEVICE_ID_ASIC))) {
174 	device_set_desc_copy(dev, TWE_DEVICE_NAME ". Driver version " TWE_DRIVER_VERSION_STRING);
175 	return(BUS_PROBE_DEFAULT);
176     }
177     return(ENXIO);
178 }
179 
180 /********************************************************************************
181  * Allocate resources, initialise the controller.
182  */
183 static int
184 twe_attach(device_t dev)
185 {
186     struct twe_softc	*sc;
187     int			rid, error;
188 
189     debug_called(4);
190 
191     /*
192      * Initialise the softc structure.
193      */
194     sc = device_get_softc(dev);
195     sc->twe_dev = dev;
196     lockinit(&sc->twe_io_lock, "twe I/O", 0, LK_CANRECURSE);
197     lockinit(&sc->twe_config_lock, "twe config", 0, LK_CANRECURSE);
198 
199     SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
200 	SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
201 	OID_AUTO, "driver_version", CTLFLAG_RD, TWE_DRIVER_VERSION_STRING, 0,
202 	"TWE driver version");
203 
204     /*
205      * Force the busmaster enable bit on, in case the BIOS forgot.
206      */
207     pci_enable_busmaster(dev);
208 
209     /*
210      * Allocate the PCI register window.
211      */
212     rid = TWE_IO_CONFIG_REG;
213     if ((sc->twe_io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
214         RF_ACTIVE)) == NULL) {
215 	twe_printf(sc, "can't allocate register window\n");
216 	twe_free(sc);
217 	return(ENXIO);
218     }
219 
220     /*
221      * Allocate the parent bus DMA tag appropriate for PCI.
222      */
223     if (bus_dma_tag_create(NULL, 				/* parent */
224 			   1, 0, 				/* alignment, boundary */
225 			   BUS_SPACE_MAXADDR_32BIT, 		/* lowaddr */
226 			   BUS_SPACE_MAXADDR, 			/* highaddr */
227 			   NULL, NULL, 				/* filter, filterarg */
228 			   MAXBSIZE, TWE_MAX_SGL_LENGTH,	/* maxsize, nsegments */
229 			   BUS_SPACE_MAXSIZE_32BIT,		/* maxsegsize */
230 			   0,					/* flags */
231 			   &sc->twe_parent_dmat)) {
232 	twe_printf(sc, "can't allocate parent DMA tag\n");
233 	twe_free(sc);
234 	return(ENOMEM);
235     }
236 
237     /*
238      * Allocate and connect our interrupt.
239      */
240     rid = 0;
241     if ((sc->twe_irq = bus_alloc_resource_any(sc->twe_dev, SYS_RES_IRQ,
242         &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
243 	twe_printf(sc, "can't allocate interrupt\n");
244 	twe_free(sc);
245 	return(ENXIO);
246     }
247     if (bus_setup_intr(sc->twe_dev, sc->twe_irq, INTR_MPSAFE,
248 			twe_pci_intr, sc, &sc->twe_intr, NULL)) {
249 	twe_printf(sc, "can't set up interrupt\n");
250 	twe_free(sc);
251 	return(ENXIO);
252     }
253 
254     /*
255      * Create DMA tag for mapping command's into controller-addressable space.
256      */
257     if (bus_dma_tag_create(sc->twe_parent_dmat, 	/* parent */
258 			   1, 0, 			/* alignment, boundary */
259 			   BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
260 			   BUS_SPACE_MAXADDR, 		/* highaddr */
261 			   NULL, NULL, 			/* filter, filterarg */
262 			   sizeof(TWE_Command) *
263 			   TWE_Q_LENGTH, 1,		/* maxsize, nsegments */
264 			   BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
265 			   0,				/* flags */
266 			   &sc->twe_cmd_dmat)) {
267 	twe_printf(sc, "can't allocate data buffer DMA tag\n");
268 	twe_free(sc);
269 	return(ENOMEM);
270     }
271     /*
272      * Allocate memory and make it available for DMA.
273      */
274     if (bus_dmamem_alloc(sc->twe_cmd_dmat, (void **)&sc->twe_cmd,
275 			 BUS_DMA_NOWAIT, &sc->twe_cmdmap)) {
276 	twe_printf(sc, "can't allocate command memory\n");
277 	return(ENOMEM);
278     }
279     bus_dmamap_load(sc->twe_cmd_dmat, sc->twe_cmdmap, sc->twe_cmd,
280 		    sizeof(TWE_Command) * TWE_Q_LENGTH,
281 		    twe_setup_request_dmamap, sc, 0);
282     bzero(sc->twe_cmd, sizeof(TWE_Command) * TWE_Q_LENGTH);
283 
284     /*
285      * Create DMA tag for mapping objects into controller-addressable space.
286      */
287     if (bus_dma_tag_create(sc->twe_parent_dmat, 	/* parent */
288 			   1, 0, 			/* alignment, boundary */
289 			   BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
290 			   BUS_SPACE_MAXADDR, 		/* highaddr */
291 			   NULL, NULL, 			/* filter, filterarg */
292 			   MAXBSIZE, TWE_MAX_SGL_LENGTH,/* maxsize, nsegments */
293 			   BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
294 			   BUS_DMA_ALLOCNOW,		/* flags */
295 			   &sc->twe_buffer_dmat)) {
296 	twe_printf(sc, "can't allocate data buffer DMA tag\n");
297 	twe_free(sc);
298 	return(ENOMEM);
299     }
300 
301     /*
302      * Create DMA tag for mapping objects into controller-addressable space.
303      */
304     if (bus_dma_tag_create(sc->twe_parent_dmat, 	/* parent */
305 			   1, 0, 			/* alignment, boundary */
306 			   BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
307 			   BUS_SPACE_MAXADDR, 		/* highaddr */
308 			   NULL, NULL, 			/* filter, filterarg */
309 			   MAXBSIZE, 1,			/* maxsize, nsegments */
310 			   BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
311 			   0,				/* flags */
312 			   &sc->twe_immediate_dmat)) {
313 	twe_printf(sc, "can't allocate data buffer DMA tag\n");
314 	twe_free(sc);
315 	return(ENOMEM);
316     }
317     /*
318      * Allocate memory for requests which cannot sleep or support continuation.
319      */
320      if (bus_dmamem_alloc(sc->twe_immediate_dmat, (void **)&sc->twe_immediate,
321 			  BUS_DMA_NOWAIT, &sc->twe_immediate_map)) {
322 	twe_printf(sc, "can't allocate memory for immediate requests\n");
323 	return(ENOMEM);
324      }
325 
326     /*
327      * Initialise the controller and driver core.
328      */
329     if ((error = twe_setup(sc))) {
330 	twe_free(sc);
331 	return(error);
332     }
333 
334     /*
335      * Print some information about the controller and configuration.
336      */
337     twe_describe_controller(sc);
338 
339     /*
340      * Create the control device.
341      */
342     sc->twe_dev_t = make_dev(&twe_ops, device_get_unit(sc->twe_dev),
343 			     UID_ROOT, GID_OPERATOR,
344 			     S_IRUSR | S_IWUSR, "twe%d",
345 			     device_get_unit(sc->twe_dev));
346     sc->twe_dev_t->si_drv1 = sc;
347 
348     /*
349      * Schedule ourselves to bring the controller up once interrupts are
350      * available.  This isn't strictly necessary, since we disable
351      * interrupts while probing the controller, but it is more in keeping
352      * with common practice for other disk devices.
353      */
354     sc->twe_ich.ich_func = twe_intrhook;
355     sc->twe_ich.ich_arg = sc;
356     sc->twe_ich.ich_desc = "twe";
357     if (config_intrhook_establish(&sc->twe_ich) != 0) {
358 	twe_printf(sc, "can't establish configuration hook\n");
359 	twe_free(sc);
360 	return(ENXIO);
361     }
362 
363     return(0);
364 }
365 
366 /********************************************************************************
367  * Free all of the resources associated with (sc).
368  *
369  * Should not be called if the controller is active.
370  */
371 static void
372 twe_free(struct twe_softc *sc)
373 {
374     struct twe_request	*tr;
375 
376     debug_called(4);
377 
378     /* throw away any command buffers */
379     while ((tr = twe_dequeue_free(sc)) != NULL)
380 	twe_free_request(tr);
381 
382     if (sc->twe_cmd != NULL) {
383 	bus_dmamap_unload(sc->twe_cmd_dmat, sc->twe_cmdmap);
384 	bus_dmamem_free(sc->twe_cmd_dmat, sc->twe_cmd, sc->twe_cmdmap);
385     }
386 
387     if (sc->twe_immediate != NULL) {
388 	bus_dmamap_unload(sc->twe_immediate_dmat, sc->twe_immediate_map);
389 	bus_dmamem_free(sc->twe_immediate_dmat, sc->twe_immediate,
390 			sc->twe_immediate_map);
391     }
392 
393     if (sc->twe_immediate_dmat)
394 	bus_dma_tag_destroy(sc->twe_immediate_dmat);
395 
396     /* destroy the data-transfer DMA tag */
397     if (sc->twe_buffer_dmat)
398 	bus_dma_tag_destroy(sc->twe_buffer_dmat);
399 
400     /* disconnect the interrupt handler */
401     if (sc->twe_intr)
402 	bus_teardown_intr(sc->twe_dev, sc->twe_irq, sc->twe_intr);
403     if (sc->twe_irq != NULL)
404 	bus_release_resource(sc->twe_dev, SYS_RES_IRQ, 0, sc->twe_irq);
405 
406     /* destroy the parent DMA tag */
407     if (sc->twe_parent_dmat)
408 	bus_dma_tag_destroy(sc->twe_parent_dmat);
409 
410     /* release the register window mapping */
411     if (sc->twe_io != NULL)
412 	bus_release_resource(sc->twe_dev, SYS_RES_IOPORT, TWE_IO_CONFIG_REG, sc->twe_io);
413 
414     /* destroy control device */
415     if (sc->twe_dev_t != NULL)
416 	destroy_dev(sc->twe_dev_t);
417     dev_ops_remove_minor(&twe_ops, device_get_unit(sc->twe_dev));
418 
419     lockuninit(&sc->twe_config_lock);
420     lockuninit(&sc->twe_io_lock);
421 }
422 
423 /********************************************************************************
424  * Disconnect from the controller completely, in preparation for unload.
425  */
426 static int
427 twe_detach(device_t dev)
428 {
429     struct twe_softc	*sc = device_get_softc(dev);
430 
431     debug_called(4);
432 
433     TWE_IO_LOCK(sc);
434     if (sc->twe_state & TWE_STATE_OPEN) {
435 	TWE_IO_UNLOCK(sc);
436 	return (EBUSY);
437     }
438     sc->twe_state |= TWE_STATE_DETACHING;
439     TWE_IO_UNLOCK(sc);
440 
441     /*
442      * Shut the controller down.
443      */
444     if (twe_shutdown(dev)) {
445 	TWE_IO_LOCK(sc);
446 	sc->twe_state &= ~TWE_STATE_DETACHING;
447 	TWE_IO_UNLOCK(sc);
448 	return (EBUSY);
449     }
450 
451     twe_free(sc);
452 
453     return(0);
454 }
455 
456 /********************************************************************************
457  * Bring the controller down to a dormant state and detach all child devices.
458  *
459  * Note that we can assume that the bioq on the controller is empty, as we won't
460  * allow shutdown if any device is open.
461  */
462 static int
463 twe_shutdown(device_t dev)
464 {
465     struct twe_softc	*sc = device_get_softc(dev);
466     int			i, error = 0;
467 
468     debug_called(4);
469 
470     /*
471      * Delete all our child devices.
472      */
473     TWE_CONFIG_LOCK(sc);
474     for (i = 0; i < TWE_MAX_UNITS; i++) {
475 	if (sc->twe_drive[i].td_disk != 0) {
476 	    if ((error = twe_detach_drive(sc, i)) != 0) {
477 		TWE_CONFIG_UNLOCK(sc);
478 		return (error);
479 	    }
480 	}
481     }
482     TWE_CONFIG_UNLOCK(sc);
483 
484     /*
485      * Bring the controller down.
486      */
487     TWE_IO_LOCK(sc);
488     twe_deinit(sc);
489     TWE_IO_UNLOCK(sc);
490 
491     return(0);
492 }
493 
494 /********************************************************************************
495  * Bring the controller to a quiescent state, ready for system suspend.
496  */
497 static int
498 twe_suspend(device_t dev)
499 {
500     struct twe_softc	*sc = device_get_softc(dev);
501 
502     debug_called(4);
503 
504     TWE_IO_LOCK(sc);
505     sc->twe_state |= TWE_STATE_SUSPEND;
506     TWE_IO_UNLOCK(sc);
507 
508     twe_disable_interrupts(sc);
509     crit_exit();
510 
511     return(0);
512 }
513 
514 /********************************************************************************
515  * Bring the controller back to a state ready for operation.
516  */
517 static int
518 twe_resume(device_t dev)
519 {
520     struct twe_softc	*sc = device_get_softc(dev);
521 
522     debug_called(4);
523 
524     TWE_IO_LOCK(sc);
525     sc->twe_state &= ~TWE_STATE_SUSPEND;
526     twe_enable_interrupts(sc);
527     TWE_IO_UNLOCK(sc);
528 
529     return(0);
530 }
531 
532 /*******************************************************************************
533  * Take an interrupt, or be poked by other code to look for interrupt-worthy
534  * status.
535  */
536 static void
537 twe_pci_intr(void *arg)
538 {
539     struct twe_softc *sc = arg;
540 
541     TWE_IO_LOCK(sc);
542     twe_intr(sc);
543     TWE_IO_UNLOCK(sc);
544 }
545 
546 /********************************************************************************
547  * Delayed-startup hook
548  */
549 static void
550 twe_intrhook(void *arg)
551 {
552     struct twe_softc		*sc = (struct twe_softc *)arg;
553 
554     /* pull ourselves off the intrhook chain */
555     config_intrhook_disestablish(&sc->twe_ich);
556 
557     /* call core startup routine */
558     twe_init(sc);
559 }
560 
561 /********************************************************************************
562  * Given a detected drive, attach it to the bio interface.
563  *
564  * This is called from twe_add_unit.
565  */
566 int
567 twe_attach_drive(struct twe_softc *sc, struct twe_drive *dr)
568 {
569     char	buf[80];
570     int		error;
571 
572     get_mplock();
573     dr->td_disk =  device_add_child(sc->twe_dev, NULL, -1);
574     if (dr->td_disk == NULL) {
575 	rel_mplock();
576 	twe_printf(sc, "Cannot add unit\n");
577 	return (EIO);
578     }
579     device_set_ivars(dr->td_disk, dr);
580 
581     /*
582      * XXX It would make sense to test the online/initialising bits, but they seem to be
583      * always set...
584      */
585     ksprintf(buf, "Unit %d, %s, %s",
586 	    dr->td_twe_unit,
587 	    twe_describe_code(twe_table_unittype, dr->td_type),
588 	    twe_describe_code(twe_table_unitstate, dr->td_state & TWE_PARAM_UNITSTATUS_MASK));
589     device_set_desc_copy(dr->td_disk, buf);
590 
591     error = device_probe_and_attach(dr->td_disk);
592     rel_mplock();
593     if (error != 0) {
594 	twe_printf(sc, "Cannot attach unit to controller. error = %d\n", error);
595 	return (EIO);
596     }
597     return (0);
598 }
599 
600 /********************************************************************************
601  * Detach the specified unit if it exsists
602  *
603  * This is called from twe_del_unit.
604  */
605 int
606 twe_detach_drive(struct twe_softc *sc, int unit)
607 {
608     int error = 0;
609 
610     TWE_CONFIG_ASSERT_LOCKED(sc);
611     get_mplock();
612     error = device_delete_child(sc->twe_dev, sc->twe_drive[unit].td_disk);
613     rel_mplock();
614     if (error != 0) {
615 	twe_printf(sc, "failed to delete unit %d\n", unit);
616 	return(error);
617     }
618     bzero(&sc->twe_drive[unit], sizeof(sc->twe_drive[unit]));
619     return(error);
620 }
621 
622 /********************************************************************************
623  * Clear a PCI parity error.
624  */
625 void
626 twe_clear_pci_parity_error(struct twe_softc *sc)
627 {
628     TWE_CONTROL(sc, TWE_CONTROL_CLEAR_PARITY_ERROR);
629     pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PARITY_ERROR, 2);
630 }
631 
632 /********************************************************************************
633  * Clear a PCI abort.
634  */
635 void
636 twe_clear_pci_abort(struct twe_softc *sc)
637 {
638     TWE_CONTROL(sc, TWE_CONTROL_CLEAR_PCI_ABORT);
639     pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PCI_ABORT, 2);
640 }
641 
642 /********************************************************************************
643  ********************************************************************************
644                                                                       Disk device
645  ********************************************************************************
646  ********************************************************************************/
647 
648 /*
649  * Disk device bus interface
650  */
651 static int twed_probe(device_t dev);
652 static int twed_attach(device_t dev);
653 static int twed_detach(device_t dev);
654 
655 static device_method_t twed_methods[] = {
656     DEVMETHOD(device_probe,	twed_probe),
657     DEVMETHOD(device_attach,	twed_attach),
658     DEVMETHOD(device_detach,	twed_detach),
659     DEVMETHOD_END
660 };
661 
662 static driver_t twed_driver = {
663     "twed",
664     twed_methods,
665     sizeof(struct twed_softc)
666 };
667 
668 static devclass_t	twed_devclass;
669 DRIVER_MODULE(twed, twe, twed_driver, twed_devclass, NULL, NULL);
670 
671 /*
672  * Disk device control interface.
673  */
674 static	d_open_t	twed_open;
675 static	d_close_t	twed_close;
676 static	d_strategy_t	twed_strategy;
677 static	d_dump_t	twed_dump;
678 
679 static struct dev_ops twed_ops = {
680 	{ "twed", 0, D_DISK | D_MPSAFE},
681 	.d_open =	twed_open,
682 	.d_close =	twed_close,
683 	.d_read =	physread,
684 	.d_write =	physwrite,
685 	.d_strategy =	twed_strategy,
686 	.d_dump =	twed_dump,
687 };
688 
689 /********************************************************************************
690  * Handle open from generic layer.
691  *
692  * Note that this is typically only called by the diskslice code, and not
693  * for opens on subdevices (eg. slices, partitions).
694  */
695 static int
696 twed_open(struct dev_open_args *ap)
697 {
698     cdev_t dev = ap->a_head.a_dev;
699     struct twed_softc	*sc = (struct twed_softc *)dev->si_drv1;
700 
701     debug_called(4);
702 
703     if (sc == NULL)
704 	return (ENXIO);
705 
706     /* check that the controller is up and running */
707     if (sc->twed_controller->twe_state & TWE_STATE_SHUTDOWN)
708 	return(ENXIO);
709 
710     sc->twed_flags |= TWED_OPEN;
711     return (0);
712 }
713 
714 /********************************************************************************
715  * Handle last close of the disk device.
716  */
717 static int
718 twed_close(struct dev_close_args *ap)
719 {
720     cdev_t dev = ap->a_head.a_dev;
721     struct twed_softc	*sc = (struct twed_softc *)dev->si_drv1;
722 
723     debug_called(4);
724 
725     if (sc == NULL)
726 	return (ENXIO);
727 
728     sc->twed_flags &= ~TWED_OPEN;
729     return (0);
730 }
731 
732 /********************************************************************************
733  * Handle an I/O request.
734  */
735 static int
736 twed_strategy(struct dev_strategy_args *ap)
737 {
738     cdev_t dev = ap->a_head.a_dev;
739     struct bio *bio = ap->a_bio;
740     struct twed_softc *sc = dev->si_drv1;
741     struct buf *bp = bio->bio_buf;
742 
743     bio->bio_driver_info = sc;
744 
745     debug_called(4);
746 
747     TWED_BIO_IN;
748 
749     /* bogus disk? */
750     if (sc == NULL || sc->twed_drive->td_disk == NULL) {
751 	bp->b_error = EINVAL;
752 	bp->b_flags |= B_ERROR;
753 	kprintf("twe: bio for invalid disk!\n");
754 	biodone(bio);
755 	TWED_BIO_OUT;
756 	return(0);
757     }
758 
759     /* perform accounting */
760     devstat_start_transaction(&sc->twed_stats);
761 
762     /* queue the bio on the controller */
763     TWE_IO_LOCK(sc->twed_controller);
764     twe_enqueue_bio(sc->twed_controller, bio);
765 
766     /* poke the controller to start I/O */
767     twe_startio(sc->twed_controller);
768     TWE_IO_UNLOCK(sc->twed_controller);
769     return(0);
770 }
771 
772 /********************************************************************************
773  * System crashdump support
774  */
775 static int
776 twed_dump(struct dev_dump_args *ap)
777 {
778     cdev_t dev = ap->a_head.a_dev;
779     size_t length = ap->a_length;
780     off_t offset = ap->a_offset;
781     void *virtual = ap->a_virtual;
782     struct twed_softc	*twed_sc;
783     struct twe_softc	*twe_sc;
784     int			error;
785 
786     twed_sc = dev->si_drv1;
787     if (twed_sc == NULL)
788 	return(ENXIO);
789     twe_sc  = (struct twe_softc *)twed_sc->twed_controller;
790 
791     if (length > 0) {
792 	if ((error = twe_dump_blocks(twe_sc, twed_sc->twed_drive->td_twe_unit, offset / TWE_BLOCK_SIZE, virtual, length / TWE_BLOCK_SIZE)) != 0)
793 	    return(error);
794     }
795     return(0);
796 }
797 
798 /********************************************************************************
799  * Handle completion of an I/O request.
800  */
801 void
802 twed_intr(struct bio *bio)
803 {
804     struct buf *bp = bio->bio_buf;
805     struct twed_softc *sc = bio->bio_driver_info;
806 
807     debug_called(4);
808 
809     /* if no error, transfer completed */
810     if (!(bp->b_flags & B_ERROR))
811 	bp->b_resid = 0;
812     devstat_end_transaction_buf(&sc->twed_stats, bp);
813     biodone(bio);
814     TWED_BIO_OUT;
815 }
816 
817 /********************************************************************************
818  * Default probe stub.
819  */
820 static int
821 twed_probe(device_t dev)
822 {
823     return (0);
824 }
825 
826 /********************************************************************************
827  * Attach a unit to the controller.
828  */
829 static int
830 twed_attach(device_t dev)
831 {
832     struct twed_softc	*sc;
833     struct disk_info	info;
834     device_t		parent;
835     cdev_t		dsk;
836 
837     debug_called(4);
838 
839     /* initialise our softc */
840     sc = device_get_softc(dev);
841     parent = device_get_parent(dev);
842     sc->twed_controller = (struct twe_softc *)device_get_softc(parent);
843     sc->twed_drive = device_get_ivars(dev);
844     sc->twed_dev = dev;
845 
846     /* report the drive */
847     twed_printf(sc, "%uMB (%u sectors)\n",
848 		sc->twed_drive->td_size / ((1024 * 1024) / TWE_BLOCK_SIZE),
849 		sc->twed_drive->td_size);
850 
851     /* attach a generic disk device to ourselves */
852 
853     sc->twed_drive->td_sys_unit = device_get_unit(dev);
854 
855     devstat_add_entry(&sc->twed_stats, "twed", sc->twed_drive->td_sys_unit,
856 			TWE_BLOCK_SIZE,
857 			DEVSTAT_NO_ORDERED_TAGS,
858 			DEVSTAT_TYPE_STORARRAY | DEVSTAT_TYPE_IF_OTHER,
859 			DEVSTAT_PRIORITY_ARRAY);
860 
861     dsk = disk_create(sc->twed_drive->td_sys_unit, &sc->twed_disk, &twed_ops);
862     dsk->si_drv1 = sc;
863     sc->twed_dev_t = dsk;
864 
865     /* set the maximum I/O size to the theoretical maximum allowed by the S/G list size */
866     dsk->si_iosize_max = (TWE_MAX_SGL_LENGTH - 1) * PAGE_SIZE;
867 
868     /*
869      * Set disk info, as it appears that all needed data is available already.
870      * Setting the disk info will also cause the probing to start.
871      */
872     bzero(&info, sizeof(info));
873     info.d_media_blksize    = TWE_BLOCK_SIZE;	/* mandatory */
874     info.d_media_blocks	    = sc->twed_drive->td_size;
875 
876     info.d_type		= DTYPE_ESDI;		/* optional */
877     info.d_secpertrack	= sc->twed_drive->td_sectors;
878     info.d_nheads	= sc->twed_drive->td_heads;
879     info.d_ncylinders	= sc->twed_drive->td_cylinders;
880     info.d_secpercyl	= sc->twed_drive->td_sectors * sc->twed_drive->td_heads;
881 
882     disk_setdiskinfo(&sc->twed_disk, &info);
883 
884     return (0);
885 }
886 
887 /********************************************************************************
888  * Disconnect ourselves from the system.
889  */
890 static int
891 twed_detach(device_t dev)
892 {
893     struct twed_softc *sc = (struct twed_softc *)device_get_softc(dev);
894 
895     debug_called(4);
896 
897     if (sc->twed_flags & TWED_OPEN)
898 	return(EBUSY);
899 
900     devstat_remove_entry(&sc->twed_stats);
901     disk_destroy(&sc->twed_disk);
902 
903     return(0);
904 }
905 
906 /********************************************************************************
907  ********************************************************************************
908                                                                              Misc
909  ********************************************************************************
910  ********************************************************************************/
911 
912 /********************************************************************************
913  * Allocate a command buffer
914  */
915 static MALLOC_DEFINE(TWE_MALLOC_CLASS, "twe_commands", "twe commands");
916 
917 struct twe_request *
918 twe_allocate_request(struct twe_softc *sc, int tag)
919 {
920     struct twe_request	*tr;
921     int aligned_size;
922 
923     /*
924      * TWE requires requests to be 512-byte aligned.  Depend on malloc()
925      * guarenteeing alignment for power-of-2 requests.  Note that the old
926      * (FreeBSD-4.x) malloc code aligned all requests, but the new slab
927      * allocator only guarentees same-size alignment for power-of-2 requests.
928      */
929     aligned_size = (sizeof(struct twe_request) + TWE_ALIGNMASK) &
930 	~TWE_ALIGNMASK;
931     tr = kmalloc(aligned_size, TWE_MALLOC_CLASS, M_INTWAIT | M_ZERO);
932     tr->tr_sc = sc;
933     tr->tr_tag = tag;
934     if (bus_dmamap_create(sc->twe_buffer_dmat, 0, &tr->tr_dmamap)) {
935 	twe_free_request(tr);
936 	twe_printf(sc, "unable to allocate dmamap for tag %d\n", tag);
937 	return(NULL);
938     }
939     return(tr);
940 }
941 
942 /********************************************************************************
943  * Permanently discard a command buffer.
944  */
945 void
946 twe_free_request(struct twe_request *tr)
947 {
948     struct twe_softc	*sc = tr->tr_sc;
949 
950     debug_called(4);
951 
952     bus_dmamap_destroy(sc->twe_buffer_dmat, tr->tr_dmamap);
953     kfree(tr, TWE_MALLOC_CLASS);
954 }
955 
956 /********************************************************************************
957  * Map/unmap (tr)'s command and data in the controller's addressable space.
958  *
959  * These routines ensure that the data which the controller is going to try to
960  * access is actually visible to the controller, in a machine-independant
961  * fashion.  Due to a hardware limitation, I/O buffers must be 512-byte aligned
962  * and we take care of that here as well.
963  */
964 static void
965 twe_fillin_sgl(TWE_SG_Entry *sgl, bus_dma_segment_t *segs, int nsegments, int max_sgl)
966 {
967     int i;
968 
969     for (i = 0; i < nsegments; i++) {
970 	sgl[i].address = segs[i].ds_addr;
971 	sgl[i].length = segs[i].ds_len;
972     }
973     for (; i < max_sgl; i++) {				/* XXX necessary? */
974 	sgl[i].address = 0;
975 	sgl[i].length = 0;
976     }
977 }
978 
979 static void
980 twe_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
981 {
982     struct twe_request	*tr = (struct twe_request *)arg;
983     struct twe_softc	*sc = tr->tr_sc;
984     TWE_Command		*cmd = TWE_FIND_COMMAND(tr);
985 
986     debug_called(4);
987 
988     if (tr->tr_flags & TWE_CMD_MAPPED)
989 	panic("already mapped command");
990 
991     tr->tr_flags |= TWE_CMD_MAPPED;
992 
993     if (tr->tr_flags & TWE_CMD_IN_PROGRESS)
994 	sc->twe_state &= ~TWE_STATE_FRZN;
995     /* save base of first segment in command (applicable if there only one segment) */
996     tr->tr_dataphys = segs[0].ds_addr;
997 
998     /* correct command size for s/g list size */
999     cmd->generic.size += 2 * nsegments;
1000 
1001     /*
1002      * Due to the fact that parameter and I/O commands have the scatter/gather list in
1003      * different places, we need to determine which sort of command this actually is
1004      * before we can populate it correctly.
1005      */
1006     switch(cmd->generic.opcode) {
1007     case TWE_OP_GET_PARAM:
1008     case TWE_OP_SET_PARAM:
1009 	cmd->generic.sgl_offset = 2;
1010 	twe_fillin_sgl(&cmd->param.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
1011 	break;
1012     case TWE_OP_READ:
1013     case TWE_OP_WRITE:
1014 	cmd->generic.sgl_offset = 3;
1015 	twe_fillin_sgl(&cmd->io.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
1016 	break;
1017     case TWE_OP_ATA_PASSTHROUGH:
1018 	cmd->generic.sgl_offset = 5;
1019 	twe_fillin_sgl(&cmd->ata.sgl[0], segs, nsegments, TWE_MAX_ATA_SGL_LENGTH);
1020 	break;
1021     default:
1022 	/*
1023 	 * Fall back to what the linux driver does.
1024 	 * Do this because the API may send an opcode
1025 	 * the driver knows nothing about and this will
1026 	 * at least stop PCIABRT's from hosing us.
1027 	 */
1028 	switch (cmd->generic.sgl_offset) {
1029 	case 2:
1030 	    twe_fillin_sgl(&cmd->param.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
1031 	    break;
1032 	case 3:
1033 	    twe_fillin_sgl(&cmd->io.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
1034 	    break;
1035 	case 5:
1036 	    twe_fillin_sgl(&cmd->ata.sgl[0], segs, nsegments, TWE_MAX_ATA_SGL_LENGTH);
1037 	    break;
1038 	}
1039     }
1040 
1041     if (tr->tr_flags & TWE_CMD_DATAIN) {
1042 	if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1043 	    bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1044 			    BUS_DMASYNC_PREREAD);
1045 	} else {
1046 	    bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1047 			    BUS_DMASYNC_PREREAD);
1048 	}
1049     }
1050 
1051     if (tr->tr_flags & TWE_CMD_DATAOUT) {
1052 	/*
1053 	 * if we're using an alignment buffer, and we're writing data
1054 	 * copy the real data out
1055 	 */
1056 	if (tr->tr_flags & TWE_CMD_ALIGNBUF)
1057 	    bcopy(tr->tr_realdata, tr->tr_data, tr->tr_length);
1058 
1059 	if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1060 	    bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1061 			    BUS_DMASYNC_PREWRITE);
1062 	} else {
1063 	    bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1064 			    BUS_DMASYNC_PREWRITE);
1065 	}
1066     }
1067 
1068     if (twe_start(tr) == EBUSY) {
1069 	tr->tr_sc->twe_state |= TWE_STATE_CTLR_BUSY;
1070 	twe_requeue_ready(tr);
1071     }
1072 }
1073 
1074 static void
1075 twe_setup_request_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1076 {
1077     struct twe_softc	*sc = (struct twe_softc *)arg;
1078 
1079     debug_called(4);
1080 
1081     /* command can't cross a page boundary */
1082     sc->twe_cmdphys = segs[0].ds_addr;
1083 }
1084 
1085 int
1086 twe_map_request(struct twe_request *tr)
1087 {
1088     struct twe_softc	*sc = tr->tr_sc;
1089     int			error = 0;
1090 
1091     debug_called(4);
1092 
1093     twe_lockassert(&sc->twe_io_lock);
1094     if (sc->twe_state & (TWE_STATE_CTLR_BUSY | TWE_STATE_FRZN)) {
1095 	twe_requeue_ready(tr);
1096 	return (EBUSY);
1097     }
1098 
1099     bus_dmamap_sync(sc->twe_cmd_dmat, sc->twe_cmdmap, BUS_DMASYNC_PREWRITE);
1100 
1101     /*
1102      * If the command involves data, map that too.
1103      */
1104     if (tr->tr_data != NULL && ((tr->tr_flags & TWE_CMD_MAPPED) == 0)) {
1105 
1106 	/*
1107 	 * Data must be 512-byte aligned; allocate a fixup buffer if it's not.
1108 	 *
1109 	 * DragonFly's malloc only guarentees alignment for requests which
1110 	 * are power-of-2 sized.
1111 	 */
1112 	if (((vm_offset_t)tr->tr_data % TWE_ALIGNMENT) != 0) {
1113 	    int aligned_size;
1114 
1115 	    tr->tr_realdata = tr->tr_data;	/* save pointer to 'real' data */
1116 	    aligned_size = TWE_ALIGNMENT;
1117 	    while (aligned_size < tr->tr_length)
1118 		aligned_size <<= 1;
1119 	    tr->tr_flags |= TWE_CMD_ALIGNBUF;
1120 	    tr->tr_data = kmalloc(aligned_size, TWE_MALLOC_CLASS, M_INTWAIT);
1121 	    if (tr->tr_data == NULL) {
1122 		twe_printf(sc, "%s: malloc failed\n", __func__);
1123 		tr->tr_data = tr->tr_realdata; /* restore original data pointer */
1124 		return(ENOMEM);
1125 	    }
1126 	}
1127 
1128 	/*
1129 	 * Map the data buffer into bus space and build the s/g list.
1130 	 */
1131 	if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1132 	    error = bus_dmamap_load(sc->twe_immediate_dmat, sc->twe_immediate_map, sc->twe_immediate,
1133 			    tr->tr_length, twe_setup_data_dmamap, tr, BUS_DMA_NOWAIT);
1134 	} else {
1135 	    error = bus_dmamap_load(sc->twe_buffer_dmat, tr->tr_dmamap, tr->tr_data, tr->tr_length,
1136 				    twe_setup_data_dmamap, tr, 0);
1137 	}
1138 	if (error == EINPROGRESS) {
1139 	    tr->tr_flags |= TWE_CMD_IN_PROGRESS;
1140 	    sc->twe_state |= TWE_STATE_FRZN;
1141 	    error = 0;
1142 	}
1143     } else
1144 	if ((error = twe_start(tr)) == EBUSY) {
1145 	    sc->twe_state |= TWE_STATE_CTLR_BUSY;
1146 	    twe_requeue_ready(tr);
1147 	}
1148 
1149     return(error);
1150 }
1151 
1152 void
1153 twe_unmap_request(struct twe_request *tr)
1154 {
1155     struct twe_softc	*sc = tr->tr_sc;
1156 
1157     debug_called(4);
1158 
1159     bus_dmamap_sync(sc->twe_cmd_dmat, sc->twe_cmdmap, BUS_DMASYNC_POSTWRITE);
1160 
1161     /*
1162      * If the command involved data, unmap that too.
1163      */
1164     if (tr->tr_data != NULL) {
1165 	if (tr->tr_flags & TWE_CMD_DATAIN) {
1166 	    if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1167 		bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1168 				BUS_DMASYNC_POSTREAD);
1169 	    } else {
1170 		bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1171 				BUS_DMASYNC_POSTREAD);
1172 	    }
1173 
1174 	    /* if we're using an alignment buffer, and we're reading data, copy the real data in */
1175 	    if (tr->tr_flags & TWE_CMD_ALIGNBUF)
1176 		bcopy(tr->tr_data, tr->tr_realdata, tr->tr_length);
1177 	}
1178 	if (tr->tr_flags & TWE_CMD_DATAOUT) {
1179 	    if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1180 		bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1181 				BUS_DMASYNC_POSTWRITE);
1182 	    } else {
1183 		bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1184 				BUS_DMASYNC_POSTWRITE);
1185 	    }
1186 	}
1187 
1188 	if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1189 	    bus_dmamap_unload(sc->twe_immediate_dmat, sc->twe_immediate_map);
1190 	} else {
1191 	    bus_dmamap_unload(sc->twe_buffer_dmat, tr->tr_dmamap);
1192 	}
1193     }
1194 
1195     /* free alignment buffer if it was used */
1196     if (tr->tr_flags & TWE_CMD_ALIGNBUF) {
1197 	kfree(tr->tr_data, TWE_MALLOC_CLASS);
1198 	tr->tr_data = tr->tr_realdata;		/* restore 'real' data pointer */
1199     }
1200 }
1201 
1202 #ifdef TWE_DEBUG
1203 void twe_report(void);
1204 /********************************************************************************
1205  * Print current controller status, call from DDB.
1206  */
1207 void
1208 twe_report(void)
1209 {
1210     struct twe_softc	*sc;
1211     int			i;
1212 
1213     for (i = 0; (sc = devclass_get_softc(twe_devclass, i)) != NULL; i++)
1214 	twe_print_controller(sc);
1215     kprintf("twed: total bio count in %u  out %u\n", twed_bio_in, twed_bio_out);
1216 }
1217 #endif
1218