xref: /dragonfly/sys/dev/raid/amr/amr.c (revision 25a2db75)
1 /*-
2  * Copyright (c) 1999,2000 Michael Smith
3  * Copyright (c) 2000 BSDi
4  * Copyright (c) 2005 Scott Long
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002 Eric Moore
30  * Copyright (c) 2002, 2004 LSI Logic Corporation
31  * All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  * 3. The party using or redistributing the source code and binary forms
42  *    agrees to the disclaimer below and the terms and conditions set forth
43  *    herein.
44  *
45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55  * SUCH DAMAGE.
56  *
57  * $FreeBSD: src/sys/dev/amr/amr.c,v 1.99 2012/08/31 09:42:46 scottl Exp $
58  */
59 
60 /*
61  * Driver for the AMI MegaRaid family of controllers.
62  */
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/malloc.h>
67 #include <sys/kernel.h>
68 #include <sys/proc.h>
69 #include <sys/sysctl.h>
70 #include <sys/sysmsg.h>
71 
72 #include <sys/bio.h>
73 #include <sys/bus.h>
74 #include <sys/conf.h>
75 #include <sys/stat.h>
76 
77 #include <machine/cpu.h>
78 #include <sys/rman.h>
79 
80 #include <bus/pci/pcireg.h>
81 #include <bus/pci/pcivar.h>
82 
83 #include <dev/raid/amr/amrio.h>
84 #include <dev/raid/amr/amrreg.h>
85 #include <dev/raid/amr/amrvar.h>
86 #define AMR_DEFINE_TABLES
87 #include <dev/raid/amr/amr_tables.h>
88 
89 SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD, 0, "AMR driver parameters");
90 
91 static d_open_t         amr_open;
92 static d_close_t        amr_close;
93 static d_ioctl_t        amr_ioctl;
94 
95 static struct dev_ops amr_ops = {
96 	{ "amr", 0, 0 },
97 	.d_open =	amr_open,
98 	.d_close =	amr_close,
99 	.d_ioctl =	amr_ioctl,
100 };
101 
102 int linux_no_adapter = 0;
103 /*
104  * Initialisation, bus interface.
105  */
106 static void	amr_startup(void *arg);
107 
108 /*
109  * Command wrappers
110  */
111 static int	amr_query_controller(struct amr_softc *sc);
112 static void	*amr_enquiry(struct amr_softc *sc, size_t bufsize,
113 			     u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status);
114 static void	amr_completeio(struct amr_command *ac);
115 static int	amr_support_ext_cdb(struct amr_softc *sc);
116 
117 /*
118  * Command buffer allocation.
119  */
120 static void	amr_alloccmd_cluster(struct amr_softc *sc);
121 static void	amr_freecmd_cluster(struct amr_command_cluster *acc);
122 
123 /*
124  * Command processing.
125  */
126 static int	amr_bio_command(struct amr_softc *sc, struct amr_command **acp);
127 static int	amr_wait_command(struct amr_command *ac);
128 static int	amr_mapcmd(struct amr_command *ac);
129 static void	amr_unmapcmd(struct amr_command *ac);
130 static int	amr_start(struct amr_command *ac);
131 static void	amr_complete(void *context, ac_qhead_t *head);
132 static void	amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
133 static void	amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
134 static void	amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
135 static void	amr_abort_load(struct amr_command *ac);
136 
137 /*
138  * Interface-specific shims
139  */
140 static int	amr_quartz_submit_command(struct amr_command *ac);
141 static int	amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
142 static int	amr_quartz_poll_command(struct amr_command *ac);
143 static int	amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac);
144 
145 static int	amr_std_submit_command(struct amr_command *ac);
146 static int	amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
147 static int	amr_std_poll_command(struct amr_command *ac);
148 static void	amr_std_attach_mailbox(struct amr_softc *sc);
149 
150 #ifdef AMR_BOARD_INIT
151 static int	amr_quartz_init(struct amr_softc *sc);
152 static int	amr_std_init(struct amr_softc *sc);
153 #endif
154 
155 /*
156  * Debugging
157  */
158 static void	amr_describe_controller(struct amr_softc *sc);
159 #ifdef AMR_DEBUG
160 #if 0
161 static void	amr_printcommand(struct amr_command *ac);
162 #endif
163 #endif
164 
165 static void	amr_init_sysctl(struct amr_softc *sc);
166 static int	amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr,
167 		    int32_t flag, struct sysmsg *sm);
168 
169 static MALLOC_DEFINE(M_AMR, "amr", "AMR memory");
170 
171 /********************************************************************************
172  ********************************************************************************
173                                                                       Inline Glue
174  ********************************************************************************
175  ********************************************************************************/
176 
177 /********************************************************************************
178  ********************************************************************************
179                                                                 Public Interfaces
180  ********************************************************************************
181  ********************************************************************************/
182 
183 /********************************************************************************
184  * Initialise the controller and softc.
185  */
186 int
187 amr_attach(struct amr_softc *sc)
188 {
189     device_t child;
190 
191     debug_called(1);
192 
193     /*
194      * Initialise per-controller queues.
195      */
196     amr_init_qhead(&sc->amr_freecmds);
197     amr_init_qhead(&sc->amr_ready);
198     TAILQ_INIT(&sc->amr_cmd_clusters);
199     bioq_init(&sc->amr_bioq);
200 
201     debug(2, "queue init done");
202 
203     /*
204      * Configure for this controller type.
205      */
206     if (AMR_IS_QUARTZ(sc)) {
207 	sc->amr_submit_command = amr_quartz_submit_command;
208 	sc->amr_get_work       = amr_quartz_get_work;
209 	sc->amr_poll_command   = amr_quartz_poll_command;
210 	sc->amr_poll_command1  = amr_quartz_poll_command1;
211     } else {
212 	sc->amr_submit_command = amr_std_submit_command;
213 	sc->amr_get_work       = amr_std_get_work;
214 	sc->amr_poll_command   = amr_std_poll_command;
215 	amr_std_attach_mailbox(sc);
216     }
217 
218 #ifdef AMR_BOARD_INIT
219     if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc)))
220 	return(ENXIO);
221 #endif
222 
223     /*
224      * Allocate initial commands.
225      */
226     amr_alloccmd_cluster(sc);
227 
228     /*
229      * Quiz controller for features and limits.
230      */
231     if (amr_query_controller(sc))
232 	return(ENXIO);
233 
234     debug(2, "controller query complete");
235 
236     /*
237      * preallocate the remaining commands.
238      */
239     while (sc->amr_nextslot < sc->amr_maxio)
240 	amr_alloccmd_cluster(sc);
241 
242     /*
243      * Setup sysctls.
244      */
245     sysctl_ctx_init(&sc->amr_sysctl_ctx);
246     sc->amr_sysctl_tree = SYSCTL_ADD_NODE(&sc->amr_sysctl_ctx,
247 	SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
248 	device_get_nameunit(sc->amr_dev), CTLFLAG_RD, 0, "");
249     if (sc->amr_sysctl_tree == NULL) {
250 	device_printf(sc->amr_dev, "can't add sysctl node\n");
251 	return (EINVAL);
252     }
253     amr_init_sysctl(sc);
254 
255     /*
256      * Attach our 'real' SCSI channels to CAM.
257      */
258     child = device_add_child(sc->amr_dev, "amrp", -1);
259     sc->amr_pass = child;
260     if (child != NULL) {
261 	device_set_softc(child, sc);
262 	device_set_desc(child, "SCSI Passthrough Bus");
263 	bus_generic_attach(sc->amr_dev);
264     }
265 
266     /*
267      * Create the control device.
268      */
269     sc->amr_dev_t = make_dev(&amr_ops, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR,
270 			     S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev));
271     sc->amr_dev_t->si_drv1 = sc;
272     linux_no_adapter++;
273     if (device_get_unit(sc->amr_dev) == 0)
274 	make_dev_alias(sc->amr_dev_t, "megadev0");
275 
276     /*
277      * Schedule ourselves to bring the controller up once interrupts are
278      * available.
279      */
280     bzero(&sc->amr_ich, sizeof(struct intr_config_hook));
281     sc->amr_ich.ich_func = amr_startup;
282     sc->amr_ich.ich_arg = sc;
283     sc->amr_ich.ich_desc = "amr";
284     if (config_intrhook_establish(&sc->amr_ich) != 0) {
285 	device_printf(sc->amr_dev, "can't establish configuration hook\n");
286 	return(ENOMEM);
287     }
288 
289     /*
290      * Print a little information about the controller.
291      */
292     amr_describe_controller(sc);
293 
294     debug(2, "attach complete");
295     return(0);
296 }
297 
298 /********************************************************************************
299  * Locate disk resources and attach children to them.
300  */
301 static void
302 amr_startup(void *arg)
303 {
304     struct amr_softc	*sc = (struct amr_softc *)arg;
305     struct amr_logdrive	*dr;
306     int			i, error;
307 
308     debug_called(1);
309 
310     /* pull ourselves off the intrhook chain */
311     if (sc->amr_ich.ich_func)
312 	config_intrhook_disestablish(&sc->amr_ich);
313     sc->amr_ich.ich_func = NULL;
314 
315     /* get up-to-date drive information */
316     if (amr_query_controller(sc)) {
317 	device_printf(sc->amr_dev, "can't scan controller for drives\n");
318 	return;
319     }
320 
321     /* iterate over available drives */
322     for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) {
323 	/* are we already attached to this drive? */
324 	if (dr->al_disk == 0) {
325 	    /* generate geometry information */
326 	    if (dr->al_size > 0x200000) {	/* extended translation? */
327 		dr->al_heads = 255;
328 		dr->al_sectors = 63;
329 	    } else {
330 		dr->al_heads = 64;
331 		dr->al_sectors = 32;
332 	    }
333 	    dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors);
334 
335 	    dr->al_disk = device_add_child(sc->amr_dev, NULL, -1);
336 	    if (dr->al_disk == 0)
337 		device_printf(sc->amr_dev, "device_add_child failed\n");
338 	    device_set_ivars(dr->al_disk, dr);
339 	}
340     }
341 
342     if ((error = bus_generic_attach(sc->amr_dev)) != 0)
343 	device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error);
344 
345     /* mark controller back up */
346     sc->amr_state &= ~AMR_STATE_SHUTDOWN;
347 
348     /* interrupts will be enabled before we do anything more */
349     sc->amr_state |= AMR_STATE_INTEN;
350 
351     return;
352 }
353 
354 static void
355 amr_init_sysctl(struct amr_softc *sc)
356 {
357 
358     SYSCTL_ADD_INT(&sc->amr_sysctl_ctx,
359 	SYSCTL_CHILDREN(sc->amr_sysctl_tree),
360 	OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0,
361 	"");
362     SYSCTL_ADD_INT(&sc->amr_sysctl_ctx,
363 	SYSCTL_CHILDREN(sc->amr_sysctl_tree),
364 	OID_AUTO, "nextslot", CTLFLAG_RD, &sc->amr_nextslot, 0,
365 	"");
366     SYSCTL_ADD_INT(&sc->amr_sysctl_ctx,
367 	SYSCTL_CHILDREN(sc->amr_sysctl_tree),
368 	OID_AUTO, "busyslots", CTLFLAG_RD, &sc->amr_busyslots, 0,
369 	"");
370     SYSCTL_ADD_INT(&sc->amr_sysctl_ctx,
371 	SYSCTL_CHILDREN(sc->amr_sysctl_tree),
372 	OID_AUTO, "maxio", CTLFLAG_RD, &sc->amr_maxio, 0,
373 	"");
374 }
375 
376 
377 /*******************************************************************************
378  * Free resources associated with a controller instance
379  */
380 void
381 amr_free(struct amr_softc *sc)
382 {
383     struct amr_command_cluster	*acc;
384 
385     /* detach from CAM */
386     if (sc->amr_pass != NULL)
387 	device_delete_child(sc->amr_dev, sc->amr_pass);
388 
389     /* throw away any command buffers */
390     while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) {
391 	TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link);
392 	amr_freecmd_cluster(acc);
393     }
394 
395     /* destroy control device */
396     if(sc->amr_dev_t != NULL)
397 	    destroy_dev(sc->amr_dev_t);
398     dev_ops_remove_minor(&amr_ops, device_get_unit(sc->amr_dev));
399 
400 #if 0 /* XXX swildner */
401     if (mtx_initialized(&sc->amr_hw_lock))
402 	mtx_destroy(&sc->amr_hw_lock);
403 
404     if (mtx_initialized(&sc->amr_list_lock))
405 	mtx_destroy(&sc->amr_list_lock);
406 #endif
407 
408     if (sc->amr_sysctl_tree != NULL)
409 	    sysctl_ctx_free(&sc->amr_sysctl_ctx);
410 
411     lockuninit(&sc->amr_hw_lock);
412     lockuninit(&sc->amr_list_lock);
413 }
414 
415 /*******************************************************************************
416  * Receive a bio structure from a child device and queue it on a particular
417  * disk resource, then poke the disk resource to start as much work as it can.
418  */
419 int
420 amr_submit_bio(struct amr_softc *sc, struct bio *bio)
421 {
422     debug_called(2);
423 
424     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
425     amr_enqueue_bio(sc, bio);
426     amr_startio(sc);
427     lockmgr(&sc->amr_list_lock, LK_RELEASE);
428     return(0);
429 }
430 
431 /********************************************************************************
432  * Accept an open operation on the control device.
433  */
434 static int
435 amr_open(struct dev_open_args *ap)
436 {
437     cdev_t		dev = ap->a_head.a_dev;
438     int			unit = minor(dev);
439     struct amr_softc	*sc = devclass_get_softc(devclass_find("amr"), unit);
440 
441     debug_called(1);
442 
443     sc->amr_state |= AMR_STATE_OPEN;
444     return(0);
445 }
446 
447 /********************************************************************************
448  * Accept the last close on the control device.
449  */
450 static int
451 amr_close(struct dev_close_args *ap)
452 {
453     cdev_t		dev = ap->a_head.a_dev;
454     int			unit = minor(dev);
455     struct amr_softc	*sc = devclass_get_softc(devclass_find("amr"), unit);
456 
457     debug_called(1);
458 
459     sc->amr_state &= ~AMR_STATE_OPEN;
460     return (0);
461 }
462 
463 /********************************************************************************
464  * Handle controller-specific control operations.
465  */
466 static void
467 amr_rescan_drives(struct cdev *dev)
468 {
469     struct amr_softc	*sc = (struct amr_softc *)dev->si_drv1;
470     int			i, error = 0;
471 
472     sc->amr_state |= AMR_STATE_REMAP_LD;
473     while (sc->amr_busyslots) {
474 	device_printf(sc->amr_dev, "idle controller\n");
475 	amr_done(sc);
476     }
477 
478     /* mark ourselves as in-shutdown */
479     sc->amr_state |= AMR_STATE_SHUTDOWN;
480 
481     /* flush controller */
482     device_printf(sc->amr_dev, "flushing cache...");
483     kprintf("%s\n", amr_flush(sc) ? "failed" : "done");
484 
485     /* delete all our child devices */
486     for(i = 0 ; i < AMR_MAXLD; i++) {
487 	if(sc->amr_drive[i].al_disk != 0) {
488 	    if((error = device_delete_child(sc->amr_dev,
489 		sc->amr_drive[i].al_disk)) != 0)
490 		goto shutdown_out;
491 
492 	     sc->amr_drive[i].al_disk = 0;
493 	}
494     }
495 
496 shutdown_out:
497     amr_startup(sc);
498 }
499 
500 /*
501  * Bug-for-bug compatibility with Linux!
502  * Some apps will send commands with inlen and outlen set to 0,
503  * even though they expect data to be transfered to them from the
504  * card.  Linux accidentally allows this by allocating a 4KB
505  * buffer for the transfer anyways, but it then throws it away
506  * without copying it back to the app.
507  *
508  * The amr(4) firmware relies on this feature.  In fact, it assumes
509  * the buffer is always a power of 2 up to a max of 64k.  There is
510  * also at least one case where it assumes a buffer less than 16k is
511  * greater than 16k.  Force a minimum buffer size of 32k and round
512  * sizes between 32k and 64k up to 64k as a workaround.
513  */
514 static unsigned long
515 amr_ioctl_buffer_length(unsigned long len)
516 {
517 
518     if (len <= 32 * 1024)
519 	return (32 * 1024);
520     if (len <= 64 * 1024)
521 	return (64 * 1024);
522     return (len);
523 }
524 
525 int
526 amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag,
527     struct sysmsg *sm)
528 {
529     struct amr_softc		*sc = (struct amr_softc *)dev->si_drv1;
530     struct amr_command		*ac;
531     struct amr_mailbox		*mb;
532     struct amr_linux_ioctl	ali;
533     void			*dp, *temp;
534     int				error;
535     int				len, ac_flags = 0;
536     int				logical_drives_changed = 0;
537     u_int32_t			linux_version = 0x02100000;
538     u_int8_t			status;
539     struct amr_passthrough	*ap;	/* 60 bytes */
540 
541     error = 0;
542     dp = NULL;
543     ac = NULL;
544     ap = NULL;
545 
546     if ((error = copyin(addr, &ali, sizeof(ali))) != 0)
547 	return (error);
548     switch (ali.ui.fcs.opcode) {
549     case 0x82:
550 	switch(ali.ui.fcs.subopcode) {
551 	case 'e':
552 	    copyout(&linux_version, (void *)(uintptr_t)ali.data,
553 		sizeof(linux_version));
554 	    error = 0;
555 	    break;
556 
557 	case 'm':
558 	    copyout(&linux_no_adapter, (void *)(uintptr_t)ali.data,
559 		sizeof(linux_no_adapter));
560 	    sm->sm_result.iresult = linux_no_adapter;
561 	    error = 0;
562 	    break;
563 
564 	default:
565 	    kprintf("Unknown subopcode\n");
566 	    error = ENOIOCTL;
567 	    break;
568 	}
569     break;
570 
571     case 0x80:
572     case 0x81:
573 	if (ali.ui.fcs.opcode == 0x80)
574 	    len = max(ali.outlen, ali.inlen);
575 	else
576 	    len = ali.ui.fcs.length;
577 
578 	mb = (void *)&ali.mbox[0];
579 
580 	if ((ali.mbox[0] == FC_DEL_LOGDRV  && ali.mbox[2] == OP_DEL_LOGDRV) ||	/* delete */
581 	    (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) {		/* create */
582 	    if (sc->amr_allow_vol_config == 0) {
583 		error = EPERM;
584 		break;
585 	    }
586 	    logical_drives_changed = 1;
587 	}
588 
589 	if (ali.mbox[0] == AMR_CMD_PASS) {
590 	    lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
591 	    while ((ac = amr_alloccmd(sc)) == NULL)
592 		lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz);
593 	    lockmgr(&sc->amr_list_lock, LK_RELEASE);
594 	    ap = &ac->ac_ccb->ccb_pthru;
595 
596 	    error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap,
597 		sizeof(struct amr_passthrough));
598 	    if (error)
599 		break;
600 
601 	    if (ap->ap_data_transfer_length)
602 		dp = kmalloc(ap->ap_data_transfer_length, M_AMR,
603 		    M_WAITOK | M_ZERO);
604 
605 	    if (ali.inlen) {
606 		error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address,
607 		    dp, ap->ap_data_transfer_length);
608 		if (error)
609 		    break;
610 	    }
611 
612 	    ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB;
613 	    bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
614 	    ac->ac_mailbox.mb_command = AMR_CMD_PASS;
615 	    ac->ac_flags = ac_flags;
616 
617 	    ac->ac_data = dp;
618 	    ac->ac_length = ap->ap_data_transfer_length;
619 	    temp = (void *)(uintptr_t)ap->ap_data_transfer_address;
620 
621 	    lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
622 	    error = amr_wait_command(ac);
623 	    lockmgr(&sc->amr_list_lock, LK_RELEASE);
624 	    if (error)
625 		break;
626 
627 	    status = ac->ac_status;
628 	    error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status));
629 	    if (error)
630 		break;
631 
632 	    if (ali.outlen) {
633 		error = copyout(dp, temp, ap->ap_data_transfer_length);
634 	        if (error)
635 		    break;
636 	    }
637 	    error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length);
638 	    if (error)
639 		break;
640 
641 	    error = 0;
642 	    break;
643 	} else if (ali.mbox[0] == AMR_CMD_PASS_64) {
644 	    kprintf("No AMR_CMD_PASS_64\n");
645 	    error = ENOIOCTL;
646 	    break;
647 	} else if (ali.mbox[0] == AMR_CMD_EXTPASS) {
648 	    kprintf("No AMR_CMD_EXTPASS\n");
649 	    error = ENOIOCTL;
650 	    break;
651 	} else {
652 	    len = amr_ioctl_buffer_length(imax(ali.inlen, ali.outlen));
653 
654 	    dp = kmalloc(len, M_AMR, M_WAITOK | M_ZERO);
655 
656 	    if (ali.inlen) {
657 		error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len);
658 		if (error)
659 		    break;
660 	    }
661 
662 	    lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
663 	    while ((ac = amr_alloccmd(sc)) == NULL)
664 		lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz);
665 
666 	    ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
667 	    bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
668 	    bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox));
669 
670 	    ac->ac_length = len;
671 	    ac->ac_data = dp;
672 	    ac->ac_flags = ac_flags;
673 
674 	    error = amr_wait_command(ac);
675 	    lockmgr(&sc->amr_list_lock, LK_RELEASE);
676 	    if (error)
677 		break;
678 
679 	    status = ac->ac_status;
680 	    error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status));
681 	    if (ali.outlen) {
682 		error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, ali.outlen);
683 		if (error)
684 		    break;
685 	    }
686 
687 	    error = 0;
688 	    if (logical_drives_changed)
689 		amr_rescan_drives(dev);
690 	    break;
691 	}
692 	break;
693 
694     default:
695 	debug(1, "unknown linux ioctl 0x%lx", cmd);
696 	kprintf("unknown linux ioctl 0x%lx\n", cmd);
697 	error = ENOIOCTL;
698 	break;
699     }
700 
701     /*
702      * At this point, we know that there is a lock held and that these
703      * objects have been allocated.
704      */
705     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
706     if (ac != NULL)
707 	amr_releasecmd(ac);
708     lockmgr(&sc->amr_list_lock, LK_RELEASE);
709     if (dp != NULL)
710 	kfree(dp, M_AMR);
711     return(error);
712 }
713 
714 static int
715 amr_ioctl(struct dev_ioctl_args *ap)
716 {
717     cdev_t			dev = ap->a_head.a_dev;
718     caddr_t			addr = ap->a_data;
719     u_long			cmd = ap->a_cmd;
720     struct amr_softc		*sc = (struct amr_softc *)dev->si_drv1;
721     union {
722 	void			*_p;
723 	struct amr_user_ioctl	*au;
724 #ifdef AMR_IO_COMMAND32
725 	struct amr_user_ioctl32	*au32;
726 #endif
727 	int			*result;
728     } arg;
729     struct amr_command		*ac;
730     struct amr_mailbox_ioctl	*mbi;
731     void			*dp, *au_buffer;
732     unsigned long		au_length, real_length;
733     unsigned char		*au_cmd;
734     int				*au_statusp;
735     int				error;
736     struct amr_passthrough	*_ap;	/* 60 bytes */
737     int				logical_drives_changed = 0;
738 
739     debug_called(1);
740 
741     arg._p = (void *)addr;
742 
743     error = 0;
744     dp = NULL;
745     ac = NULL;
746     _ap = NULL;
747 
748     switch(cmd) {
749 
750     case AMR_IO_VERSION:
751 	debug(1, "AMR_IO_VERSION");
752 	*arg.result = AMR_IO_VERSION_NUMBER;
753 	return(0);
754 
755 #ifdef AMR_IO_COMMAND32
756     /*
757      * Accept ioctl-s from 32-bit binaries on non-32-bit
758      * platforms, such as AMD. LSI's MEGAMGR utility is
759      * the only example known today...	-mi
760      */
761     case AMR_IO_COMMAND32:
762 	debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]);
763 	au_cmd = arg.au32->au_cmd;
764 	au_buffer = (void *)(u_int64_t)arg.au32->au_buffer;
765 	au_length = arg.au32->au_length;
766 	au_statusp = &arg.au32->au_status;
767 	break;
768 #endif
769 
770     case AMR_IO_COMMAND:
771 	debug(1, "AMR_IO_COMMAND  0x%x", arg.au->au_cmd[0]);
772 	au_cmd = arg.au->au_cmd;
773 	au_buffer = (void *)arg.au->au_buffer;
774 	au_length = arg.au->au_length;
775 	au_statusp = &arg.au->au_status;
776 	break;
777 
778     case 0xc0046d00:
779     case 0xc06e6d00:	/* Linux emulation */
780 	{
781 	    devclass_t			devclass;
782 	    struct amr_linux_ioctl	ali;
783 	    int				adapter, error;
784 
785 	    devclass = devclass_find("amr");
786 	    if (devclass == NULL)
787 		return (ENOENT);
788 
789 	    error = copyin(addr, &ali, sizeof(ali));
790 	    if (error)
791 		return (error);
792 	    if (ali.ui.fcs.opcode == 0x82)
793 		adapter = 0;
794 	    else
795 		adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
796 
797 	    sc = devclass_get_softc(devclass, adapter);
798 	    if (sc == NULL)
799 		return (ENOENT);
800 
801 	    return (amr_linux_ioctl_int(sc->amr_dev_t, cmd, addr, 0, ap->a_sysmsg));
802 	}
803     default:
804 	debug(1, "unknown ioctl 0x%lx", cmd);
805 	return(ENOIOCTL);
806     }
807 
808     if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) ||	/* delete */
809 	(au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) {		/* create */
810 	if (sc->amr_allow_vol_config == 0) {
811 	    error = EPERM;
812 	    goto out;
813 	}
814 	logical_drives_changed = 1;
815     }
816 
817     /* handle inbound data buffer */
818     real_length = amr_ioctl_buffer_length(au_length);
819     if (au_length != 0 && au_cmd[0] != 0x06) {
820 	if ((dp = kmalloc(real_length, M_AMR, M_WAITOK|M_ZERO)) == NULL) {
821 	    error = ENOMEM;
822 	    goto out;
823 	}
824 	if ((error = copyin(au_buffer, dp, au_length)) != 0) {
825 	    kfree(dp, M_AMR);
826 	    return (error);
827 	}
828 	debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp);
829     }
830 
831     /* Allocate this now before the mutex gets held */
832 
833     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
834     while ((ac = amr_alloccmd(sc)) == NULL)
835 	lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz);
836 
837     /* handle SCSI passthrough command */
838     if (au_cmd[0] == AMR_CMD_PASS) {
839         int len;
840 
841 	_ap = &ac->ac_ccb->ccb_pthru;
842 	bzero(_ap, sizeof(struct amr_passthrough));
843 
844 	/* copy cdb */
845         len = au_cmd[2];
846 	_ap->ap_cdb_length = len;
847 	bcopy(au_cmd + 3, _ap->ap_cdb, len);
848 
849 	/* build passthrough */
850 	_ap->ap_timeout		= au_cmd[len + 3] & 0x07;
851 	_ap->ap_ars		= (au_cmd[len + 3] & 0x08) ? 1 : 0;
852 	_ap->ap_islogical	= (au_cmd[len + 3] & 0x80) ? 1 : 0;
853 	_ap->ap_logical_drive_no = au_cmd[len + 4];
854 	_ap->ap_channel		= au_cmd[len + 5];
855 	_ap->ap_scsi_id 	= au_cmd[len + 6];
856 	_ap->ap_request_sense_length	= 14;
857 	_ap->ap_data_transfer_length	= au_length;
858 	/* XXX what about the request-sense area? does the caller want it? */
859 
860 	/* build command */
861 	ac->ac_mailbox.mb_command = AMR_CMD_PASS;
862 	ac->ac_flags = AMR_CMD_CCB;
863 
864     } else {
865 	/* direct command to controller */
866 	mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox;
867 
868 	/* copy pertinent mailbox items */
869 	mbi->mb_command = au_cmd[0];
870 	mbi->mb_channel = au_cmd[1];
871 	mbi->mb_param = au_cmd[2];
872 	mbi->mb_pad[0] = au_cmd[3];
873 	mbi->mb_drive = au_cmd[4];
874 	ac->ac_flags = 0;
875     }
876 
877     /* build the command */
878     ac->ac_data = dp;
879     ac->ac_length = real_length;
880     ac->ac_flags |= AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
881 
882     /* run the command */
883     error = amr_wait_command(ac);
884     lockmgr(&sc->amr_list_lock, LK_RELEASE);
885     if (error)
886 	goto out;
887 
888     /* copy out data and set status */
889     if (au_length != 0) {
890 	error = copyout(dp, au_buffer, au_length);
891     }
892     debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer);
893     if (dp != NULL)
894 	debug(2, "%p status 0x%x", dp, ac->ac_status);
895     *au_statusp = ac->ac_status;
896 
897 out:
898     /*
899      * At this point, we know that there is a lock held and that these
900      * objects have been allocated.
901      */
902     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
903     if (ac != NULL)
904 	amr_releasecmd(ac);
905     lockmgr(&sc->amr_list_lock, LK_RELEASE);
906     if (dp != NULL)
907 	kfree(dp, M_AMR);
908 
909     if (logical_drives_changed)
910 	amr_rescan_drives(dev);
911 
912     return(error);
913 }
914 
915 /********************************************************************************
916  ********************************************************************************
917                                                                  Command Wrappers
918  ********************************************************************************
919  ********************************************************************************/
920 
921 /********************************************************************************
922  * Interrogate the controller for the operational parameters we require.
923  */
924 static int
925 amr_query_controller(struct amr_softc *sc)
926 {
927     struct amr_enquiry3	*aex;
928     struct amr_prodinfo	*ap;
929     struct amr_enquiry	*ae;
930     int			ldrv;
931     int			status;
932 
933     /*
934      * Greater than 10 byte cdb support
935      */
936     sc->support_ext_cdb = amr_support_ext_cdb(sc);
937 
938     if(sc->support_ext_cdb) {
939 	debug(2,"supports extended CDBs.");
940     }
941 
942     /*
943      * Try to issue an ENQUIRY3 command
944      */
945     if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
946 			   AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) {
947 
948 	/*
949 	 * Fetch current state of logical drives.
950 	 */
951 	for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
952 	    sc->amr_drive[ldrv].al_size       = aex->ae_drivesize[ldrv];
953 	    sc->amr_drive[ldrv].al_state      = aex->ae_drivestate[ldrv];
954 	    sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv];
955 	    debug(2, "  drive %d: %d state %x properties %x", ldrv, sc->amr_drive[ldrv].al_size,
956 		  sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
957 	}
958 	kfree(aex, M_AMR);
959 
960 	/*
961 	 * Get product info for channel count.
962 	 */
963 	if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) {
964 	    device_printf(sc->amr_dev, "can't obtain product data from controller\n");
965 	    return(1);
966 	}
967 	sc->amr_maxdrives = 40;
968 	sc->amr_maxchan = ap->ap_nschan;
969 	sc->amr_maxio = ap->ap_maxio;
970 	sc->amr_type |= AMR_TYPE_40LD;
971 	kfree(ap, M_AMR);
972 
973 	ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status);
974 	if (ap != NULL)
975 	    kfree(ap, M_AMR);
976 	if (!status) {
977 	    sc->amr_ld_del_supported = 1;
978 	    device_printf(sc->amr_dev, "delete logical drives supported by controller\n");
979 	}
980     } else {
981 
982 	/* failed, try the 8LD ENQUIRY commands */
983 	if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) {
984 	    if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) {
985 		device_printf(sc->amr_dev, "can't obtain configuration data from controller\n");
986 		return(1);
987 	    }
988 	    ae->ae_signature = 0;
989 	}
990 
991 	/*
992 	 * Fetch current state of logical drives.
993 	 */
994 	for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
995 	    sc->amr_drive[ldrv].al_size       = ae->ae_ldrv.al_size[ldrv];
996 	    sc->amr_drive[ldrv].al_state      = ae->ae_ldrv.al_state[ldrv];
997 	    sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv];
998 	    debug(2, "  drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
999 		  sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1000 	}
1001 
1002 	sc->amr_maxdrives = 8;
1003 	sc->amr_maxchan = ae->ae_adapter.aa_channels;
1004 	sc->amr_maxio = ae->ae_adapter.aa_maxio;
1005 	kfree(ae, M_AMR);
1006     }
1007 
1008     /*
1009      * Mark remaining drives as unused.
1010      */
1011     for (; ldrv < AMR_MAXLD; ldrv++)
1012 	sc->amr_drive[ldrv].al_size = 0xffffffff;
1013 
1014     /*
1015      * Cap the maximum number of outstanding I/Os.  AMI's Linux driver doesn't trust
1016      * the controller's reported value, and lockups have been seen when we do.
1017      */
1018     sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD);
1019 
1020     return(0);
1021 }
1022 
1023 /********************************************************************************
1024  * Run a generic enquiry-style command.
1025  */
1026 static void *
1027 amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status)
1028 {
1029     struct amr_command	*ac;
1030     void		*result;
1031     u_int8_t		*mbox;
1032     int			error;
1033 
1034     debug_called(1);
1035 
1036     error = 1;
1037     result = NULL;
1038 
1039     /* get ourselves a command buffer */
1040     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1041     ac = amr_alloccmd(sc);
1042     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1043     if (ac == NULL)
1044 	goto out;
1045     /* allocate the response structure */
1046     if ((result = kmalloc(bufsize, M_AMR, M_ZERO|M_NOWAIT)) == NULL)
1047 	goto out;
1048     /* set command flags */
1049 
1050     ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN;
1051 
1052     /* point the command at our data */
1053     ac->ac_data = result;
1054     ac->ac_length = bufsize;
1055 
1056     /* build the command proper */
1057     mbox = (u_int8_t *)&ac->ac_mailbox;		/* XXX want a real structure for this? */
1058     mbox[0] = cmd;
1059     mbox[2] = cmdsub;
1060     mbox[3] = cmdqual;
1061     *status = 0;
1062 
1063     /* can't assume that interrupts are going to work here, so play it safe */
1064     if (sc->amr_poll_command(ac))
1065 	goto out;
1066     error = ac->ac_status;
1067     *status = ac->ac_status;
1068 
1069  out:
1070     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1071     if (ac != NULL)
1072 	amr_releasecmd(ac);
1073     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1074     if ((error != 0) && (result != NULL)) {
1075 	kfree(result, M_AMR);
1076 	result = NULL;
1077     }
1078     return(result);
1079 }
1080 
1081 /********************************************************************************
1082  * Flush the controller's internal cache, return status.
1083  */
1084 int
1085 amr_flush(struct amr_softc *sc)
1086 {
1087     struct amr_command	*ac;
1088     int			error;
1089 
1090     /* get ourselves a command buffer */
1091     error = 1;
1092     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1093     ac = amr_alloccmd(sc);
1094     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1095     if (ac == NULL)
1096 	goto out;
1097     /* set command flags */
1098     ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1099 
1100     /* build the command proper */
1101     ac->ac_mailbox.mb_command = AMR_CMD_FLUSH;
1102 
1103     /* we have to poll, as the system may be going down or otherwise damaged */
1104     if (sc->amr_poll_command(ac))
1105 	goto out;
1106     error = ac->ac_status;
1107 
1108  out:
1109     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1110     if (ac != NULL)
1111 	amr_releasecmd(ac);
1112     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1113     return(error);
1114 }
1115 
1116 /********************************************************************************
1117  * Detect extented cdb >> greater than 10 byte cdb support
1118  * returns '1' means this support exist
1119  * returns '0' means this support doesn't exist
1120  */
1121 static int
1122 amr_support_ext_cdb(struct amr_softc *sc)
1123 {
1124     struct amr_command	*ac;
1125     u_int8_t		*mbox;
1126     int			error;
1127 
1128     /* get ourselves a command buffer */
1129     error = 0;
1130     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1131     ac = amr_alloccmd(sc);
1132     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1133     if (ac == NULL)
1134 	goto out;
1135     /* set command flags */
1136     ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1137 
1138     /* build the command proper */
1139     mbox = (u_int8_t *)&ac->ac_mailbox;		/* XXX want a real structure for this? */
1140     mbox[0] = 0xA4;
1141     mbox[2] = 0x16;
1142 
1143 
1144     /* we have to poll, as the system may be going down or otherwise damaged */
1145     if (sc->amr_poll_command(ac))
1146 	goto out;
1147     if( ac->ac_status == AMR_STATUS_SUCCESS ) {
1148 	    error = 1;
1149     }
1150 
1151 out:
1152     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1153     if (ac != NULL)
1154 	amr_releasecmd(ac);
1155     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1156     return(error);
1157 }
1158 
1159 /********************************************************************************
1160  * Try to find I/O work for the controller from one or more of the work queues.
1161  *
1162  * We make the assumption that if the controller is not ready to take a command
1163  * at some given time, it will generate an interrupt at some later time when
1164  * it is.
1165  */
1166 void
1167 amr_startio(struct amr_softc *sc)
1168 {
1169     struct amr_command	*ac;
1170 
1171     /* spin until something prevents us from doing any work */
1172     for (;;) {
1173 
1174 	/* Don't bother to queue commands no bounce buffers are available. */
1175 	if (sc->amr_state & AMR_STATE_QUEUE_FRZN)
1176 	    break;
1177 
1178 	/* try to get a ready command */
1179 	ac = amr_dequeue_ready(sc);
1180 
1181 	/* if that failed, build a command from a bio */
1182 	if (ac == NULL)
1183 	    (void)amr_bio_command(sc, &ac);
1184 
1185 	/* if that failed, build a command from a ccb */
1186 	if ((ac == NULL) && (sc->amr_cam_command != NULL))
1187 	    sc->amr_cam_command(sc, &ac);
1188 
1189 	/* if we don't have anything to do, give up */
1190 	if (ac == NULL)
1191 	    break;
1192 
1193 	/* try to give the command to the controller; if this fails save it for later and give up */
1194 	if (amr_start(ac)) {
1195 	    debug(2, "controller busy, command deferred");
1196 	    amr_requeue_ready(ac);	/* XXX schedule retry very soon? */
1197 	    break;
1198 	}
1199     }
1200 }
1201 
1202 /********************************************************************************
1203  * Handle completion of an I/O command.
1204  */
1205 static void
1206 amr_completeio(struct amr_command *ac)
1207 {
1208     struct amr_softc		*sc = ac->ac_sc;
1209     static struct timeval	lastfail;
1210     static int			curfail;
1211     struct buf			*bp = ac->ac_bio->bio_buf;
1212 
1213     if (ac->ac_status != AMR_STATUS_SUCCESS) {	/* could be more verbose here? */
1214 	bp->b_error = EIO;
1215 	bp->b_flags |= B_ERROR;
1216 
1217 	if (ppsratecheck(&lastfail, &curfail, 1))
1218 	    device_printf(sc->amr_dev, "I/O error - 0x%x\n", ac->ac_status);
1219 /*	amr_printcommand(ac);*/
1220     }
1221     amrd_intr(ac->ac_bio);
1222     lockmgr(&ac->ac_sc->amr_list_lock, LK_EXCLUSIVE);
1223     amr_releasecmd(ac);
1224     lockmgr(&ac->ac_sc->amr_list_lock, LK_RELEASE);
1225 }
1226 
1227 /********************************************************************************
1228  ********************************************************************************
1229                                                                Command Processing
1230  ********************************************************************************
1231  ********************************************************************************/
1232 
1233 /********************************************************************************
1234  * Convert a bio off the top of the bio queue into a command.
1235  */
1236 static int
1237 amr_bio_command(struct amr_softc *sc, struct amr_command **acp)
1238 {
1239     struct amr_command	*ac;
1240     struct amrd_softc	*amrd;
1241     struct bio		*bio;
1242     struct buf		*bp;
1243     int			error;
1244     int			blkcount;
1245     int			driveno;
1246     int			cmd;
1247 
1248     ac = NULL;
1249     error = 0;
1250 
1251     /* get a command */
1252     if ((ac = amr_alloccmd(sc)) == NULL)
1253 	return (ENOMEM);
1254 
1255     /* get a bio to work on */
1256     if ((bio = amr_dequeue_bio(sc)) == NULL) {
1257 	amr_releasecmd(ac);
1258 	return (0);
1259     }
1260 
1261     /* connect the bio to the command */
1262     bp = bio->bio_buf;
1263     ac->ac_complete = amr_completeio;
1264     ac->ac_bio = bio;
1265     ac->ac_data = bp->b_data;
1266     ac->ac_length = bp->b_bcount;
1267     cmd = 0;
1268     switch (bp->b_cmd) {
1269     case BUF_CMD_READ:
1270 	ac->ac_flags |= AMR_CMD_DATAIN;
1271 	if (AMR_IS_SG64(sc)) {
1272 	    cmd = AMR_CMD_LREAD64;
1273 	    ac->ac_flags |= AMR_CMD_SG64;
1274 	} else
1275 	    cmd = AMR_CMD_LREAD;
1276 	break;
1277     case BUF_CMD_WRITE:
1278 	ac->ac_flags |= AMR_CMD_DATAOUT;
1279 	if (AMR_IS_SG64(sc)) {
1280 	    cmd = AMR_CMD_LWRITE64;
1281 	    ac->ac_flags |= AMR_CMD_SG64;
1282 	} else
1283 	    cmd = AMR_CMD_LWRITE;
1284 	break;
1285     case BUF_CMD_FLUSH:
1286 	ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1287 	cmd = AMR_CMD_FLUSH;
1288 	break;
1289     default:
1290 	panic("Invalid bio command");
1291     }
1292     amrd = (struct amrd_softc *)bio->bio_driver_info;
1293     driveno = amrd->amrd_drive - sc->amr_drive;
1294     blkcount = (bp->b_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE;
1295 
1296     ac->ac_mailbox.mb_command = cmd;
1297     if (bp->b_cmd & (BUF_CMD_READ|BUF_CMD_WRITE)) {
1298 	ac->ac_mailbox.mb_blkcount = blkcount;
1299 	ac->ac_mailbox.mb_lba = bio->bio_offset / AMR_BLKSIZE;
1300 	if (((bio->bio_offset / AMR_BLKSIZE) + blkcount) > sc->amr_drive[driveno].al_size) {
1301 	    device_printf(sc->amr_dev,
1302 			  "I/O beyond end of unit (%lld,%d > %lu)\n",
1303 			  (long long)(bio->bio_offset / AMR_BLKSIZE), blkcount,
1304 			  (u_long)sc->amr_drive[driveno].al_size);
1305 	}
1306     }
1307     ac->ac_mailbox.mb_drive = driveno;
1308     if (sc->amr_state & AMR_STATE_REMAP_LD)
1309 	ac->ac_mailbox.mb_drive |= 0x80;
1310 
1311     /* we fill in the s/g related data when the command is mapped */
1312 
1313 
1314     *acp = ac;
1315     return(error);
1316 }
1317 
1318 /********************************************************************************
1319  * Take a command, submit it to the controller and sleep until it completes
1320  * or fails.  Interrupts must be enabled, returns nonzero on error.
1321  */
1322 static int
1323 amr_wait_command(struct amr_command *ac)
1324 {
1325     int			error = 0;
1326     struct amr_softc	*sc = ac->ac_sc;
1327 
1328     debug_called(1);
1329 
1330     ac->ac_complete = NULL;
1331     ac->ac_flags |= AMR_CMD_SLEEP;
1332     if ((error = amr_start(ac)) != 0) {
1333 	return(error);
1334     }
1335 
1336     while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) {
1337 	error = lksleep(ac,&sc->amr_list_lock, 0, "amrwcmd", 0);
1338     }
1339 
1340     return(error);
1341 }
1342 
1343 /********************************************************************************
1344  * Take a command, submit it to the controller and busy-wait for it to return.
1345  * Returns nonzero on error.  Can be safely called with interrupts enabled.
1346  */
1347 static int
1348 amr_std_poll_command(struct amr_command *ac)
1349 {
1350     struct amr_softc	*sc = ac->ac_sc;
1351     int			error, count;
1352 
1353     debug_called(2);
1354 
1355     ac->ac_complete = NULL;
1356     if ((error = amr_start(ac)) != 0)
1357 	return(error);
1358 
1359     count = 0;
1360     do {
1361 	/*
1362 	 * Poll for completion, although the interrupt handler may beat us to it.
1363 	 * Note that the timeout here is somewhat arbitrary.
1364 	 */
1365 	amr_done(sc);
1366 	DELAY(1000);
1367     } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000));
1368     if (!(ac->ac_flags & AMR_CMD_BUSY)) {
1369 	error = 0;
1370     } else {
1371 	/* XXX the slot is now marked permanently busy */
1372 	error = EIO;
1373 	device_printf(sc->amr_dev, "polled command timeout\n");
1374     }
1375     return(error);
1376 }
1377 
1378 static void
1379 amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1380 {
1381     struct amr_command *ac = arg;
1382     struct amr_softc *sc = ac->ac_sc;
1383     int mb_channel;
1384 
1385     if (err) {
1386 	device_printf(sc->amr_dev, "error %d in %s", err, __func__);
1387 	ac->ac_status = AMR_STATUS_ABORTED;
1388 	return;
1389     }
1390 
1391     amr_setup_sg(arg, segs, nsegs, err);
1392 
1393     /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1394     mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1395     if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1396         ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1397         (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1398 	((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1399 
1400     ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1401     ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1402     if (AC_IS_SG64(ac)) {
1403 	ac->ac_sg64_hi = 0;
1404 	ac->ac_sg64_lo = ac->ac_sgbusaddr;
1405     }
1406 
1407     sc->amr_poll_command1(sc, ac);
1408 }
1409 
1410 /********************************************************************************
1411  * Take a command, submit it to the controller and busy-wait for it to return.
1412  * Returns nonzero on error.  Can be safely called with interrupts enabled.
1413  */
1414 static int
1415 amr_quartz_poll_command(struct amr_command *ac)
1416 {
1417     struct amr_softc	*sc = ac->ac_sc;
1418     int			error;
1419 
1420     debug_called(2);
1421 
1422     error = 0;
1423 
1424     if (AC_IS_SG64(ac)) {
1425 	ac->ac_tag = sc->amr_buffer64_dmat;
1426 	ac->ac_datamap = ac->ac_dma64map;
1427     } else {
1428 	ac->ac_tag = sc->amr_buffer_dmat;
1429 	ac->ac_datamap = ac->ac_dmamap;
1430     }
1431 
1432     /* now we have a slot, we can map the command (unmapped in amr_complete) */
1433     if (ac->ac_data != NULL && ac->ac_length != 0) {
1434 	if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1435 	    ac->ac_length, amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) {
1436 	    error = 1;
1437 	}
1438     } else {
1439 	error = amr_quartz_poll_command1(sc, ac);
1440     }
1441 
1442     return (error);
1443 }
1444 
1445 static int
1446 amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac)
1447 {
1448     int count, error;
1449 
1450     lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE);
1451     if ((sc->amr_state & AMR_STATE_INTEN) == 0) {
1452 	count=0;
1453 	while (sc->amr_busyslots) {
1454 	    lksleep(sc, &sc->amr_hw_lock, PCATCH, "amrpoll", hz);
1455 	    if(count++>10) {
1456 		break;
1457 	    }
1458 	}
1459 
1460 	if(sc->amr_busyslots) {
1461 	    device_printf(sc->amr_dev, "adapter is busy\n");
1462 	    lockmgr(&sc->amr_hw_lock, LK_RELEASE);
1463 	    if (ac->ac_data != NULL) {
1464 		bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1465 	    }
1466 	    ac->ac_status=0;
1467 	    return(1);
1468 	}
1469     }
1470 
1471     bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE);
1472 
1473     /* clear the poll/ack fields in the mailbox */
1474     sc->amr_mailbox->mb_ident = 0xFE;
1475     sc->amr_mailbox->mb_nstatus = 0xFF;
1476     sc->amr_mailbox->mb_status = 0xFF;
1477     sc->amr_mailbox->mb_poll = 0;
1478     sc->amr_mailbox->mb_ack = 0;
1479     sc->amr_mailbox->mb_busy = 1;
1480 
1481     AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
1482 
1483     while(sc->amr_mailbox->mb_nstatus == 0xFF)
1484 	DELAY(1);
1485     while(sc->amr_mailbox->mb_status == 0xFF)
1486 	DELAY(1);
1487     ac->ac_status=sc->amr_mailbox->mb_status;
1488     error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0;
1489     while(sc->amr_mailbox->mb_poll != 0x77)
1490 	DELAY(1);
1491     sc->amr_mailbox->mb_poll = 0;
1492     sc->amr_mailbox->mb_ack = 0x77;
1493 
1494     /* acknowledge that we have the commands */
1495     AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK);
1496     while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
1497 	DELAY(1);
1498     lockmgr(&sc->amr_hw_lock, LK_RELEASE);
1499 
1500     /* unmap the command's data buffer */
1501     if (ac->ac_flags & AMR_CMD_DATAIN) {
1502 	bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTREAD);
1503     }
1504     if (ac->ac_flags & AMR_CMD_DATAOUT) {
1505 	bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTWRITE);
1506     }
1507     bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1508 
1509     return(error);
1510 }
1511 
1512 static __inline int
1513 amr_freeslot(struct amr_command *ac)
1514 {
1515     struct amr_softc *sc = ac->ac_sc;
1516     int			slot;
1517 
1518     debug_called(3);
1519 
1520     slot = ac->ac_slot;
1521     if (sc->amr_busycmd[slot] == NULL)
1522 	panic("amr: slot %d not busy?", slot);
1523 
1524     sc->amr_busycmd[slot] = NULL;
1525     atomic_subtract_int(&sc->amr_busyslots, 1);
1526 
1527     return (0);
1528 }
1529 
1530 /********************************************************************************
1531  * Map/unmap (ac)'s data in the controller's addressable space as required.
1532  *
1533  * These functions may be safely called multiple times on a given command.
1534  */
1535 static void
1536 amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1537 {
1538     struct amr_command	*ac = (struct amr_command *)arg;
1539     struct amr_sgentry	*sg;
1540     struct amr_sg64entry *sg64;
1541     int flags, i;
1542 
1543     debug_called(3);
1544 
1545     /* get base address of s/g table */
1546     sg = ac->ac_sg.sg32;
1547     sg64 = ac->ac_sg.sg64;
1548 
1549     if (AC_IS_SG64(ac)) {
1550 	ac->ac_nsegments = nsegments;
1551 	ac->ac_mb_physaddr = 0xffffffff;
1552 	for (i = 0; i < nsegments; i++, sg64++) {
1553 	    sg64->sg_addr = segs[i].ds_addr;
1554 	    sg64->sg_count = segs[i].ds_len;
1555 	}
1556     } else {
1557 	/* decide whether we need to populate the s/g table */
1558 	if (nsegments < 2) {
1559 	    ac->ac_nsegments = 0;
1560 	    ac->ac_mb_physaddr = segs[0].ds_addr;
1561 	} else {
1562             ac->ac_nsegments = nsegments;
1563 	    ac->ac_mb_physaddr = ac->ac_sgbusaddr;
1564 	    for (i = 0; i < nsegments; i++, sg++) {
1565 		sg->sg_addr = segs[i].ds_addr;
1566 		sg->sg_count = segs[i].ds_len;
1567 	    }
1568 	}
1569     }
1570 
1571     flags = 0;
1572     if (ac->ac_flags & AMR_CMD_DATAIN)
1573 	flags |= BUS_DMASYNC_PREREAD;
1574     if (ac->ac_flags & AMR_CMD_DATAOUT)
1575 	flags |= BUS_DMASYNC_PREWRITE;
1576     bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flags);
1577     ac->ac_flags |= AMR_CMD_MAPPED;
1578 }
1579 
1580 static void
1581 amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1582 {
1583     struct amr_command *ac = arg;
1584     struct amr_softc *sc = ac->ac_sc;
1585     int mb_channel;
1586 
1587     if (err) {
1588 	device_printf(sc->amr_dev, "error %d in %s", err, __func__);
1589 	amr_abort_load(ac);
1590 	return;
1591     }
1592 
1593     amr_setup_sg(arg, segs, nsegs, err);
1594 
1595     /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1596     mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1597     if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1598         ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1599         (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1600 	((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1601 
1602     ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1603     ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1604     if (AC_IS_SG64(ac)) {
1605 	ac->ac_sg64_hi = 0;
1606 	ac->ac_sg64_lo = ac->ac_sgbusaddr;
1607     }
1608 
1609     if (sc->amr_submit_command(ac) == EBUSY) {
1610 	amr_freeslot(ac);
1611 	amr_requeue_ready(ac);
1612     }
1613 }
1614 
1615 static void
1616 amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1617 {
1618     struct amr_command *ac = arg;
1619     struct amr_softc *sc = ac->ac_sc;
1620     struct amr_passthrough *ap = &ac->ac_ccb->ccb_pthru;
1621     struct amr_ext_passthrough *aep = &ac->ac_ccb->ccb_epthru;
1622 
1623     if (err) {
1624 	device_printf(sc->amr_dev, "error %d in %s", err, __func__);
1625 	amr_abort_load(ac);
1626 	return;
1627     }
1628 
1629     /* Set up the mailbox portion of the command to point at the ccb */
1630     ac->ac_mailbox.mb_nsgelem = 0;
1631     ac->ac_mailbox.mb_physaddr = ac->ac_ccb_busaddr;
1632 
1633     amr_setup_sg(arg, segs, nsegs, err);
1634 
1635     switch (ac->ac_mailbox.mb_command) {
1636     case AMR_CMD_EXTPASS:
1637 	aep->ap_no_sg_elements = ac->ac_nsegments;
1638 	aep->ap_data_transfer_address = ac->ac_mb_physaddr;
1639         break;
1640     case AMR_CMD_PASS:
1641 	ap->ap_no_sg_elements = ac->ac_nsegments;
1642 	ap->ap_data_transfer_address = ac->ac_mb_physaddr;
1643 	break;
1644     default:
1645 	panic("Unknown ccb command");
1646     }
1647 
1648     if (sc->amr_submit_command(ac) == EBUSY) {
1649 	amr_freeslot(ac);
1650 	amr_requeue_ready(ac);
1651     }
1652 }
1653 
1654 static int
1655 amr_mapcmd(struct amr_command *ac)
1656 {
1657     bus_dmamap_callback_t *cb;
1658     struct amr_softc	*sc = ac->ac_sc;
1659 
1660     debug_called(3);
1661 
1662     if (AC_IS_SG64(ac)) {
1663 	ac->ac_tag = sc->amr_buffer64_dmat;
1664 	ac->ac_datamap = ac->ac_dma64map;
1665     } else {
1666 	ac->ac_tag = sc->amr_buffer_dmat;
1667 	ac->ac_datamap = ac->ac_dmamap;
1668     }
1669 
1670     if (ac->ac_flags & AMR_CMD_CCB)
1671 	cb = amr_setup_ccb;
1672     else
1673 	cb = amr_setup_data;
1674 
1675     /* if the command involves data at all, and hasn't been mapped */
1676     if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) {
1677 	/* map the data buffers into bus space and build the s/g list */
1678 	if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1679 	     ac->ac_length, cb, ac, 0) == EINPROGRESS) {
1680 	    sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1681 	}
1682    } else {
1683 	if (sc->amr_submit_command(ac) == EBUSY) {
1684 	    amr_freeslot(ac);
1685 	    amr_requeue_ready(ac);
1686 	}
1687    }
1688 
1689     return (0);
1690 }
1691 
1692 static void
1693 amr_unmapcmd(struct amr_command *ac)
1694 {
1695     int			flag;
1696 
1697     debug_called(3);
1698 
1699     /* if the command involved data at all and was mapped */
1700     if (ac->ac_flags & AMR_CMD_MAPPED) {
1701 
1702 	if (ac->ac_data != NULL) {
1703 
1704 	    flag = 0;
1705 	    if (ac->ac_flags & AMR_CMD_DATAIN)
1706 		flag |= BUS_DMASYNC_POSTREAD;
1707 	    if (ac->ac_flags & AMR_CMD_DATAOUT)
1708 		flag |= BUS_DMASYNC_POSTWRITE;
1709 
1710 	    bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flag);
1711 	    bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1712 	}
1713 
1714 	ac->ac_flags &= ~AMR_CMD_MAPPED;
1715     }
1716 }
1717 
1718 static void
1719 amr_abort_load(struct amr_command *ac)
1720 {
1721     ac_qhead_t head;
1722     struct amr_softc *sc = ac->ac_sc;
1723 
1724     KKASSERT(lockstatus(&sc->amr_list_lock, curthread) != 0);
1725 
1726     ac->ac_status = AMR_STATUS_ABORTED;
1727     amr_init_qhead(&head);
1728     amr_enqueue_completed(ac, &head);
1729 
1730     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1731     amr_complete(sc, &head);
1732     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1733 }
1734 
1735 /********************************************************************************
1736  * Take a command and give it to the controller, returns 0 if successful, or
1737  * EBUSY if the command should be retried later.
1738  */
1739 static int
1740 amr_start(struct amr_command *ac)
1741 {
1742     struct amr_softc *sc;
1743     int error = 0;
1744     int slot;
1745 
1746     debug_called(3);
1747 
1748     /* mark command as busy so that polling consumer can tell */
1749     sc = ac->ac_sc;
1750     ac->ac_flags |= AMR_CMD_BUSY;
1751 
1752     /* get a command slot (freed in amr_done) */
1753     slot = ac->ac_slot;
1754     if (sc->amr_busycmd[slot] != NULL)
1755 	panic("amr: slot %d busy?", slot);
1756     sc->amr_busycmd[slot] = ac;
1757     atomic_add_int(&sc->amr_busyslots, 1);
1758 
1759     /* Now we have a slot, we can map the command (unmapped in amr_complete). */
1760     if ((error = amr_mapcmd(ac)) == ENOMEM) {
1761 	/*
1762 	 * Memroy resources are short, so free the slot and let this be tried
1763 	 * later.
1764 	 */
1765 	amr_freeslot(ac);
1766     }
1767 
1768     return (error);
1769 }
1770 
1771 /********************************************************************************
1772  * Extract one or more completed commands from the controller (sc)
1773  *
1774  * Returns nonzero if any commands on the work queue were marked as completed.
1775  */
1776 
1777 int
1778 amr_done(struct amr_softc *sc)
1779 {
1780     ac_qhead_t		head;
1781     struct amr_command	*ac;
1782     struct amr_mailbox	mbox;
1783     int			i, idx, result;
1784 
1785     debug_called(3);
1786 
1787     /* See if there's anything for us to do */
1788     result = 0;
1789     amr_init_qhead(&head);
1790 
1791     /* loop collecting completed commands */
1792     for (;;) {
1793 	/* poll for a completed command's identifier and status */
1794 	if (sc->amr_get_work(sc, &mbox)) {
1795 	    result = 1;
1796 
1797 	    /* iterate over completed commands in this result */
1798 	    for (i = 0; i < mbox.mb_nstatus; i++) {
1799 		/* get pointer to busy command */
1800 		idx = mbox.mb_completed[i] - 1;
1801 		ac = sc->amr_busycmd[idx];
1802 
1803 		/* really a busy command? */
1804 		if (ac != NULL) {
1805 
1806 		    /* pull the command from the busy index */
1807 		    amr_freeslot(ac);
1808 
1809 		    /* save status for later use */
1810 		    ac->ac_status = mbox.mb_status;
1811 		    amr_enqueue_completed(ac, &head);
1812 		    debug(3, "completed command with status %x", mbox.mb_status);
1813 		} else {
1814 		    device_printf(sc->amr_dev, "bad slot %d completed\n", idx);
1815 		}
1816 	    }
1817 	} else
1818 	    break;	/* no work */
1819     }
1820 
1821     /* handle completion and timeouts */
1822     amr_complete(sc, &head);
1823 
1824     return(result);
1825 }
1826 
1827 /********************************************************************************
1828  * Do completion processing on done commands on (sc)
1829  */
1830 
1831 static void
1832 amr_complete(void *context, ac_qhead_t *head)
1833 {
1834     struct amr_softc	*sc = (struct amr_softc *)context;
1835     struct amr_command	*ac;
1836 
1837     debug_called(3);
1838 
1839     /* pull completed commands off the queue */
1840     for (;;) {
1841 	ac = amr_dequeue_completed(sc, head);
1842 	if (ac == NULL)
1843 	    break;
1844 
1845 	/* unmap the command's data buffer */
1846 	amr_unmapcmd(ac);
1847 
1848 	/*
1849 	 * Is there a completion handler?
1850 	 */
1851 	if (ac->ac_complete != NULL) {
1852 	    /* unbusy the command */
1853 	    ac->ac_flags &= ~AMR_CMD_BUSY;
1854 	    ac->ac_complete(ac);
1855 
1856 	    /*
1857 	     * Is someone sleeping on this one?
1858 	     */
1859 	} else {
1860 	    lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1861 	    ac->ac_flags &= ~AMR_CMD_BUSY;
1862 	    if (ac->ac_flags & AMR_CMD_SLEEP) {
1863 		/* unbusy the command */
1864 		wakeup(ac);
1865 	    }
1866 	    lockmgr(&sc->amr_list_lock, LK_RELEASE);
1867 	}
1868 
1869 	if(!sc->amr_busyslots) {
1870 	    wakeup(sc);
1871 	}
1872     }
1873 
1874     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1875     sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
1876     amr_startio(sc);
1877     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1878 }
1879 
1880 /********************************************************************************
1881  ********************************************************************************
1882                                                         Command Buffer Management
1883  ********************************************************************************
1884  ********************************************************************************/
1885 
1886 /********************************************************************************
1887  * Get a new command buffer.
1888  *
1889  * This may return NULL in low-memory cases.
1890  *
1891  * If possible, we recycle a command buffer that's been used before.
1892  */
1893 struct amr_command *
1894 amr_alloccmd(struct amr_softc *sc)
1895 {
1896     struct amr_command	*ac;
1897 
1898     debug_called(3);
1899 
1900     ac = amr_dequeue_free(sc);
1901     if (ac == NULL) {
1902 	sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1903 	return(NULL);
1904     }
1905 
1906     /* clear out significant fields */
1907     ac->ac_status = 0;
1908     bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox));
1909     ac->ac_flags = 0;
1910     ac->ac_bio = NULL;
1911     ac->ac_data = NULL;
1912     ac->ac_complete = NULL;
1913     ac->ac_retries = 0;
1914     ac->ac_tag = NULL;
1915     ac->ac_datamap = NULL;
1916     return(ac);
1917 }
1918 
1919 /********************************************************************************
1920  * Release a command buffer for recycling.
1921  */
1922 void
1923 amr_releasecmd(struct amr_command *ac)
1924 {
1925     debug_called(3);
1926 
1927     amr_enqueue_free(ac);
1928 }
1929 
1930 /********************************************************************************
1931  * Allocate a new command cluster and initialise it.
1932  */
1933 static void
1934 amr_alloccmd_cluster(struct amr_softc *sc)
1935 {
1936     struct amr_command_cluster	*acc;
1937     struct amr_command		*ac;
1938     int				i, nextslot;
1939 
1940     /*
1941      * If we haven't found the real limit yet, let us have a couple of
1942      * commands in order to be able to probe.
1943      */
1944     if (sc->amr_maxio == 0)
1945 	sc->amr_maxio = 2;
1946 
1947     if (sc->amr_nextslot > sc->amr_maxio)
1948 	return;
1949     acc = kmalloc(AMR_CMD_CLUSTERSIZE, M_AMR, M_NOWAIT | M_ZERO);
1950     if (acc != NULL) {
1951 	nextslot = sc->amr_nextslot;
1952 	lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1953 	TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link);
1954 	lockmgr(&sc->amr_list_lock, LK_RELEASE);
1955 	for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
1956 	    ac = &acc->acc_command[i];
1957 	    ac->ac_sc = sc;
1958 	    ac->ac_slot = nextslot;
1959 
1960 	    /*
1961 	     * The SG table for each slot is a fixed size and is assumed to
1962 	     * to hold 64-bit s/g objects when the driver is configured to do
1963 	     * 64-bit DMA.  32-bit DMA commands still use the same table, but
1964 	     * cast down to 32-bit objects.
1965 	     */
1966 	    if (AMR_IS_SG64(sc)) {
1967 		ac->ac_sgbusaddr = sc->amr_sgbusaddr +
1968 		    (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry));
1969 	        ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG);
1970 	    } else {
1971 		ac->ac_sgbusaddr = sc->amr_sgbusaddr +
1972 		    (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry));
1973 	        ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
1974 	    }
1975 
1976 	    ac->ac_ccb = sc->amr_ccb + ac->ac_slot;
1977 	    ac->ac_ccb_busaddr = sc->amr_ccb_busaddr +
1978 		(ac->ac_slot * sizeof(union amr_ccb));
1979 
1980 	    if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap))
1981 		break;
1982 	    if (AMR_IS_SG64(sc) &&
1983 		(bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map)))
1984 		break;
1985 	    amr_releasecmd(ac);
1986 	    if (++nextslot > sc->amr_maxio)
1987 		break;
1988 	}
1989 	sc->amr_nextslot = nextslot;
1990     }
1991 }
1992 
1993 /********************************************************************************
1994  * Free a command cluster
1995  */
1996 static void
1997 amr_freecmd_cluster(struct amr_command_cluster *acc)
1998 {
1999     struct amr_softc	*sc = acc->acc_command[0].ac_sc;
2000     int			i;
2001 
2002     for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2003 	if (acc->acc_command[i].ac_sc == NULL)
2004 	    break;
2005 	bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap);
2006 	if (AMR_IS_SG64(sc))
2007 		bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map);
2008     }
2009     kfree(acc, M_AMR);
2010 }
2011 
2012 /********************************************************************************
2013  ********************************************************************************
2014                                                          Interface-specific Shims
2015  ********************************************************************************
2016  ********************************************************************************/
2017 
2018 /********************************************************************************
2019  * Tell the controller that the mailbox contains a valid command
2020  */
2021 static int
2022 amr_quartz_submit_command(struct amr_command *ac)
2023 {
2024     struct amr_softc	*sc = ac->ac_sc;
2025     static struct timeval lastfail;
2026     static int		curfail;
2027     int			i = 0;
2028 
2029     lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE);
2030     while (sc->amr_mailbox->mb_busy && (i++ < 10)) {
2031         DELAY(1);
2032 	/* This is a no-op read that flushes pending mailbox updates */
2033 	AMR_QGET_ODB(sc);
2034     }
2035     if (sc->amr_mailbox->mb_busy) {
2036 	lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2037 	if (ac->ac_retries++ > 1000) {
2038 	    if (ppsratecheck(&lastfail, &curfail, 1))
2039 		device_printf(sc->amr_dev, "Too many retries on command %p.  "
2040 			      "Controller is likely dead\n", ac);
2041 	    ac->ac_retries = 0;
2042 	}
2043 	return (EBUSY);
2044     }
2045 
2046     /*
2047      * Save the slot number so that we can locate this command when complete.
2048      * Note that ident = 0 seems to be special, so we don't use it.
2049      */
2050     ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2051     bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2052     sc->amr_mailbox->mb_busy = 1;
2053     sc->amr_mailbox->mb_poll = 0;
2054     sc->amr_mailbox->mb_ack  = 0;
2055     sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi;
2056     sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo;
2057 
2058     AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
2059     lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2060     return(0);
2061 }
2062 
2063 static int
2064 amr_std_submit_command(struct amr_command *ac)
2065 {
2066     struct amr_softc	*sc = ac->ac_sc;
2067     static struct timeval lastfail;
2068     static int		curfail;
2069 
2070     lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE);
2071     if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) {
2072 	lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2073 	if (ac->ac_retries++ > 1000) {
2074 	    if (ppsratecheck(&lastfail, &curfail, 1))
2075 		device_printf(sc->amr_dev, "Too many retries on command %p.  "
2076 			      "Controller is likely dead\n", ac);
2077 	    ac->ac_retries = 0;
2078 	}
2079 	return (EBUSY);
2080     }
2081 
2082     /*
2083      * Save the slot number so that we can locate this command when complete.
2084      * Note that ident = 0 seems to be special, so we don't use it.
2085      */
2086     ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2087     bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2088     sc->amr_mailbox->mb_busy = 1;
2089     sc->amr_mailbox->mb_poll = 0;
2090     sc->amr_mailbox->mb_ack  = 0;
2091 
2092     AMR_SPOST_COMMAND(sc);
2093     lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2094     return(0);
2095 }
2096 
2097 /********************************************************************************
2098  * Claim any work that the controller has completed; acknowledge completion,
2099  * save details of the completion in (mbsave)
2100  */
2101 static int
2102 amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2103 {
2104     int		worked, i;
2105     u_int32_t	outd;
2106     u_int8_t	nstatus;
2107     u_int8_t	completed[46];
2108 
2109     debug_called(3);
2110 
2111     worked = 0;
2112 
2113     /* work waiting for us? */
2114     if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) {
2115 
2116 	/* acknowledge interrupt */
2117 	AMR_QPUT_ODB(sc, AMR_QODB_READY);
2118 
2119 	while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff)
2120 	    DELAY(1);
2121 	sc->amr_mailbox->mb_nstatus = 0xff;
2122 
2123 	/* wait until fw wrote out all completions */
2124 	for (i = 0; i < nstatus; i++) {
2125 	    while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff)
2126 		DELAY(1);
2127 	    sc->amr_mailbox->mb_completed[i] = 0xff;
2128 	}
2129 
2130 	/* Save information for later processing */
2131 	mbsave->mb_nstatus = nstatus;
2132 	mbsave->mb_status = sc->amr_mailbox->mb_status;
2133 	sc->amr_mailbox->mb_status = 0xff;
2134 
2135 	for (i = 0; i < nstatus; i++)
2136 	    mbsave->mb_completed[i] = completed[i];
2137 
2138 	/* acknowledge that we have the commands */
2139 	AMR_QPUT_IDB(sc, AMR_QIDB_ACK);
2140 
2141 #if 0
2142 #ifndef AMR_QUARTZ_GOFASTER
2143 	/*
2144 	 * This waits for the controller to notice that we've taken the
2145 	 * command from it.  It's very inefficient, and we shouldn't do it,
2146 	 * but if we remove this code, we stop completing commands under
2147 	 * load.
2148 	 *
2149 	 * Peter J says we shouldn't do this.  The documentation says we
2150 	 * should.  Who is right?
2151 	 */
2152 	while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
2153 	    ;				/* XXX aiee! what if it dies? */
2154 #endif
2155 #endif
2156 
2157 	worked = 1;			/* got some work */
2158     }
2159 
2160     return(worked);
2161 }
2162 
2163 static int
2164 amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2165 {
2166     int		worked;
2167     u_int8_t	istat;
2168 
2169     debug_called(3);
2170 
2171     worked = 0;
2172 
2173     /* check for valid interrupt status */
2174     istat = AMR_SGET_ISTAT(sc);
2175     if ((istat & AMR_SINTR_VALID) != 0) {
2176 	AMR_SPUT_ISTAT(sc, istat);	/* ack interrupt status */
2177 
2178 	/* save mailbox, which contains a list of completed commands */
2179 	bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave));
2180 
2181 	AMR_SACK_INTERRUPT(sc);		/* acknowledge we have the mailbox */
2182 	worked = 1;
2183     }
2184 
2185     return(worked);
2186 }
2187 
2188 /********************************************************************************
2189  * Notify the controller of the mailbox location.
2190  */
2191 static void
2192 amr_std_attach_mailbox(struct amr_softc *sc)
2193 {
2194 
2195     /* program the mailbox physical address */
2196     AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys         & 0xff);
2197     AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >>  8) & 0xff);
2198     AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff);
2199     AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff);
2200     AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR);
2201 
2202     /* clear any outstanding interrupt and enable interrupts proper */
2203     AMR_SACK_INTERRUPT(sc);
2204     AMR_SENABLE_INTR(sc);
2205 }
2206 
2207 #ifdef AMR_BOARD_INIT
2208 /********************************************************************************
2209  * Initialise the controller
2210  */
2211 static int
2212 amr_quartz_init(struct amr_softc *sc)
2213 {
2214     int		status, ostatus;
2215 
2216     device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc));
2217 
2218     AMR_QRESET(sc);
2219 
2220     ostatus = 0xff;
2221     while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) {
2222 	if (status != ostatus) {
2223 	    device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status));
2224 	    ostatus = status;
2225 	}
2226 	switch (status) {
2227 	case AMR_QINIT_NOMEM:
2228 	    return(ENOMEM);
2229 
2230 	case AMR_QINIT_SCAN:
2231 	    /* XXX we could print channel/target here */
2232 	    break;
2233 	}
2234     }
2235     return(0);
2236 }
2237 
2238 static int
2239 amr_std_init(struct amr_softc *sc)
2240 {
2241     int		status, ostatus;
2242 
2243     device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc));
2244 
2245     AMR_SRESET(sc);
2246 
2247     ostatus = 0xff;
2248     while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) {
2249 	if (status != ostatus) {
2250 	    device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status));
2251 	    ostatus = status;
2252 	}
2253 	switch (status) {
2254 	case AMR_SINIT_NOMEM:
2255 	    return(ENOMEM);
2256 
2257 	case AMR_SINIT_INPROG:
2258 	    /* XXX we could print channel/target here? */
2259 	    break;
2260 	}
2261     }
2262     return(0);
2263 }
2264 #endif
2265 
2266 /********************************************************************************
2267  ********************************************************************************
2268                                                                         Debugging
2269  ********************************************************************************
2270  ********************************************************************************/
2271 
2272 /********************************************************************************
2273  * Identify the controller and print some information about it.
2274  */
2275 static void
2276 amr_describe_controller(struct amr_softc *sc)
2277 {
2278     struct amr_prodinfo	*ap;
2279     struct amr_enquiry	*ae;
2280     char		*prod;
2281     int			status;
2282 
2283     /*
2284      * Try to get 40LD product info, which tells us what the card is labelled as.
2285      */
2286     if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) {
2287 	device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n",
2288 		      ap->ap_product, ap->ap_firmware, ap->ap_bios,
2289 		      ap->ap_memsize);
2290 
2291 	kfree(ap, M_AMR);
2292 	return;
2293     }
2294 
2295     /*
2296      * Try 8LD extended ENQUIRY to get controller signature, and use lookup table.
2297      */
2298     if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) {
2299 	prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature);
2300 
2301     } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) {
2302 
2303 	/*
2304 	 * Try to work it out based on the PCI signatures.
2305 	 */
2306 	switch (pci_get_device(sc->amr_dev)) {
2307 	case 0x9010:
2308 	    prod = "Series 428";
2309 	    break;
2310 	case 0x9060:
2311 	    prod = "Series 434";
2312 	    break;
2313 	default:
2314 	    prod = "unknown controller";
2315 	    break;
2316 	}
2317     } else {
2318 	device_printf(sc->amr_dev, "<unsupported controller>\n");
2319 	return;
2320     }
2321 
2322     /*
2323      * HP NetRaid controllers have a special encoding of the firmware and
2324      * BIOS versions. The AMI version seems to have it as strings whereas
2325      * the HP version does it with a leading uppercase character and two
2326      * binary numbers.
2327      */
2328 
2329     if(ae->ae_adapter.aa_firmware[2] >= 'A' &&
2330        ae->ae_adapter.aa_firmware[2] <= 'Z' &&
2331        ae->ae_adapter.aa_firmware[1] <  ' ' &&
2332        ae->ae_adapter.aa_firmware[0] <  ' ' &&
2333        ae->ae_adapter.aa_bios[2] >= 'A'     &&
2334        ae->ae_adapter.aa_bios[2] <= 'Z'     &&
2335        ae->ae_adapter.aa_bios[1] <  ' '     &&
2336        ae->ae_adapter.aa_bios[0] <  ' ') {
2337 
2338 	/* this looks like we have an HP NetRaid version of the MegaRaid */
2339 
2340     	if(ae->ae_signature == AMR_SIG_438) {
2341     		/* the AMI 438 is a NetRaid 3si in HP-land */
2342     		prod = "HP NetRaid 3si";
2343     	}
2344 
2345 	device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n",
2346 		      prod, ae->ae_adapter.aa_firmware[2],
2347 		      ae->ae_adapter.aa_firmware[1],
2348 		      ae->ae_adapter.aa_firmware[0],
2349 		      ae->ae_adapter.aa_bios[2],
2350 		      ae->ae_adapter.aa_bios[1],
2351 		      ae->ae_adapter.aa_bios[0],
2352 		      ae->ae_adapter.aa_memorysize);
2353     } else {
2354 	device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n",
2355 		      prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios,
2356 		      ae->ae_adapter.aa_memorysize);
2357     }
2358     kfree(ae, M_AMR);
2359 }
2360 
2361 int
2362 amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks)
2363 {
2364     struct amr_command	*ac;
2365     int			error = EIO;
2366 
2367     debug_called(1);
2368 
2369     sc->amr_state |= AMR_STATE_INTEN;
2370 
2371     /* get ourselves a command buffer */
2372     if ((ac = amr_alloccmd(sc)) == NULL)
2373 	goto out;
2374     /* set command flags */
2375     ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
2376 
2377     /* point the command at our data */
2378     ac->ac_data = data;
2379     ac->ac_length = blks * AMR_BLKSIZE;
2380 
2381     /* build the command proper */
2382     ac->ac_mailbox.mb_command 	= AMR_CMD_LWRITE;
2383     ac->ac_mailbox.mb_blkcount	= blks;
2384     ac->ac_mailbox.mb_lba	= lba;
2385     ac->ac_mailbox.mb_drive	= unit;
2386 
2387     /* can't assume that interrupts are going to work here, so play it safe */
2388     if (sc->amr_poll_command(ac))
2389 	goto out;
2390     error = ac->ac_status;
2391 
2392  out:
2393     if (ac != NULL)
2394 	amr_releasecmd(ac);
2395 
2396     sc->amr_state &= ~AMR_STATE_INTEN;
2397     return (error);
2398 }
2399 
2400 
2401 
2402 #ifdef AMR_DEBUG
2403 /********************************************************************************
2404  * Print the command (ac) in human-readable format
2405  */
2406 #if 0
2407 static void
2408 amr_printcommand(struct amr_command *ac)
2409 {
2410     struct amr_softc	*sc = ac->ac_sc;
2411     struct amr_sgentry	*sg;
2412     int			i;
2413 
2414     device_printf(sc->amr_dev, "cmd %x  ident %d  drive %d\n",
2415 		  ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive);
2416     device_printf(sc->amr_dev, "blkcount %d  lba %d\n",
2417 		  ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba);
2418     device_printf(sc->amr_dev, "virtaddr %p  length %lu\n", ac->ac_data, (unsigned long)ac->ac_length);
2419     device_printf(sc->amr_dev, "sg physaddr %08x  nsg %d\n",
2420 		  ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem);
2421     device_printf(sc->amr_dev, "ccb %p  bio %p\n", ac->ac_ccb_data, ac->ac_bio);
2422 
2423     /* get base address of s/g table */
2424     sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2425     for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++)
2426 	device_printf(sc->amr_dev, "  %x/%d\n", sg->sg_addr, sg->sg_count);
2427 }
2428 #endif
2429 #endif
2430