xref: /dragonfly/sys/dev/raid/amr/amr.c (revision a32bc35d)
1 /*-
2  * Copyright (c) 1999,2000 Michael Smith
3  * Copyright (c) 2000 BSDi
4  * Copyright (c) 2005 Scott Long
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002 Eric Moore
30  * Copyright (c) 2002, 2004 LSI Logic Corporation
31  * All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  * 3. The party using or redistributing the source code and binary forms
42  *    agrees to the disclaimer below and the terms and conditions set forth
43  *    herein.
44  *
45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55  * SUCH DAMAGE.
56  *
57  * $FreeBSD: src/sys/dev/amr/amr.c,v 1.97 2012/04/20 20:27:31 jhb Exp $
58  */
59 
60 /*
61  * Driver for the AMI MegaRaid family of controllers.
62  */
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/malloc.h>
67 #include <sys/kernel.h>
68 #include <sys/proc.h>
69 #include <sys/sysctl.h>
70 #include <sys/sysmsg.h>
71 
72 #include <sys/bio.h>
73 #include <sys/bus.h>
74 #include <sys/conf.h>
75 #include <sys/stat.h>
76 
77 #include <machine/cpu.h>
78 #include <sys/rman.h>
79 
80 #include <bus/pci/pcireg.h>
81 #include <bus/pci/pcivar.h>
82 
83 #include <dev/raid/amr/amrio.h>
84 #include <dev/raid/amr/amrreg.h>
85 #include <dev/raid/amr/amrvar.h>
86 #define AMR_DEFINE_TABLES
87 #include <dev/raid/amr/amr_tables.h>
88 
89 SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD, 0, "AMR driver parameters");
90 
91 static d_open_t         amr_open;
92 static d_close_t        amr_close;
93 static d_ioctl_t        amr_ioctl;
94 
95 static struct dev_ops amr_ops = {
96 	{ "amr", 0, 0 },
97 	.d_open =	amr_open,
98 	.d_close =	amr_close,
99 	.d_ioctl =	amr_ioctl,
100 };
101 
102 int linux_no_adapter = 0;
103 /*
104  * Initialisation, bus interface.
105  */
106 static void	amr_startup(void *arg);
107 
108 /*
109  * Command wrappers
110  */
111 static int	amr_query_controller(struct amr_softc *sc);
112 static void	*amr_enquiry(struct amr_softc *sc, size_t bufsize,
113 			     u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status);
114 static void	amr_completeio(struct amr_command *ac);
115 static int	amr_support_ext_cdb(struct amr_softc *sc);
116 
117 /*
118  * Command buffer allocation.
119  */
120 static void	amr_alloccmd_cluster(struct amr_softc *sc);
121 static void	amr_freecmd_cluster(struct amr_command_cluster *acc);
122 
123 /*
124  * Command processing.
125  */
126 static int	amr_bio_command(struct amr_softc *sc, struct amr_command **acp);
127 static int	amr_wait_command(struct amr_command *ac);
128 static int	amr_mapcmd(struct amr_command *ac);
129 static void	amr_unmapcmd(struct amr_command *ac);
130 static int	amr_start(struct amr_command *ac);
131 static void	amr_complete(void *context, ac_qhead_t *head);
132 static void	amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
133 static void	amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
134 static void	amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
135 static void	amr_abort_load(struct amr_command *ac);
136 
137 #if 0
138 /*
139  * Status monitoring
140  */
141 static void	amr_periodic(void *data);
142 #endif
143 
144 /*
145  * Interface-specific shims
146  */
147 static int	amr_quartz_submit_command(struct amr_command *ac);
148 static int	amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
149 static int	amr_quartz_poll_command(struct amr_command *ac);
150 static int	amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac);
151 
152 static int	amr_std_submit_command(struct amr_command *ac);
153 static int	amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
154 static int	amr_std_poll_command(struct amr_command *ac);
155 static void	amr_std_attach_mailbox(struct amr_softc *sc);
156 
157 #ifdef AMR_BOARD_INIT
158 static int	amr_quartz_init(struct amr_softc *sc);
159 static int	amr_std_init(struct amr_softc *sc);
160 #endif
161 
162 /*
163  * Debugging
164  */
165 static void	amr_describe_controller(struct amr_softc *sc);
166 #ifdef AMR_DEBUG
167 #if 0
168 static void	amr_printcommand(struct amr_command *ac);
169 #endif
170 #endif
171 
172 static void	amr_init_sysctl(struct amr_softc *sc);
173 static int	amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr,
174 		    int32_t flag, struct sysmsg *sm);
175 
176 static MALLOC_DEFINE(M_AMR, "amr", "AMR memory");
177 
178 /********************************************************************************
179  ********************************************************************************
180                                                                       Inline Glue
181  ********************************************************************************
182  ********************************************************************************/
183 
184 /********************************************************************************
185  ********************************************************************************
186                                                                 Public Interfaces
187  ********************************************************************************
188  ********************************************************************************/
189 
190 /********************************************************************************
191  * Initialise the controller and softc.
192  */
193 int
194 amr_attach(struct amr_softc *sc)
195 {
196     device_t child;
197 
198     debug_called(1);
199 
200     /*
201      * Initialise per-controller queues.
202      */
203     amr_init_qhead(&sc->amr_freecmds);
204     amr_init_qhead(&sc->amr_ready);
205     TAILQ_INIT(&sc->amr_cmd_clusters);
206     bioq_init(&sc->amr_bioq);
207 
208     debug(2, "queue init done");
209 
210     /*
211      * Configure for this controller type.
212      */
213     if (AMR_IS_QUARTZ(sc)) {
214 	sc->amr_submit_command = amr_quartz_submit_command;
215 	sc->amr_get_work       = amr_quartz_get_work;
216 	sc->amr_poll_command   = amr_quartz_poll_command;
217 	sc->amr_poll_command1  = amr_quartz_poll_command1;
218     } else {
219 	sc->amr_submit_command = amr_std_submit_command;
220 	sc->amr_get_work       = amr_std_get_work;
221 	sc->amr_poll_command   = amr_std_poll_command;
222 	amr_std_attach_mailbox(sc);
223     }
224 
225 #ifdef AMR_BOARD_INIT
226     if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc)))
227 	return(ENXIO);
228 #endif
229 
230     /*
231      * Allocate initial commands.
232      */
233     amr_alloccmd_cluster(sc);
234 
235     /*
236      * Quiz controller for features and limits.
237      */
238     if (amr_query_controller(sc))
239 	return(ENXIO);
240 
241     debug(2, "controller query complete");
242 
243     /*
244      * preallocate the remaining commands.
245      */
246     while (sc->amr_nextslot < sc->amr_maxio)
247 	amr_alloccmd_cluster(sc);
248 
249     /*
250      * Setup sysctls.
251      */
252     sysctl_ctx_init(&sc->amr_sysctl_ctx);
253     sc->amr_sysctl_tree = SYSCTL_ADD_NODE(&sc->amr_sysctl_ctx,
254 	SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
255 	device_get_nameunit(sc->amr_dev), CTLFLAG_RD, 0, "");
256     if (sc->amr_sysctl_tree == NULL) {
257 	device_printf(sc->amr_dev, "can't add sysctl node\n");
258 	return (EINVAL);
259     }
260     amr_init_sysctl(sc);
261 
262     /*
263      * Attach our 'real' SCSI channels to CAM.
264      */
265     child = device_add_child(sc->amr_dev, "amrp", -1);
266     sc->amr_pass = child;
267     if (child != NULL) {
268 	device_set_softc(child, sc);
269 	device_set_desc(child, "SCSI Passthrough Bus");
270 	bus_generic_attach(sc->amr_dev);
271     }
272 
273     /*
274      * Create the control device.
275      */
276     sc->amr_dev_t = make_dev(&amr_ops, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR,
277 			     S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev));
278     sc->amr_dev_t->si_drv1 = sc;
279     linux_no_adapter++;
280     if (device_get_unit(sc->amr_dev) == 0)
281 	make_dev_alias(sc->amr_dev_t, "megadev0");
282 
283     /*
284      * Schedule ourselves to bring the controller up once interrupts are
285      * available.
286      */
287     bzero(&sc->amr_ich, sizeof(struct intr_config_hook));
288     sc->amr_ich.ich_func = amr_startup;
289     sc->amr_ich.ich_arg = sc;
290     if (config_intrhook_establish(&sc->amr_ich) != 0) {
291 	device_printf(sc->amr_dev, "can't establish configuration hook\n");
292 	return(ENOMEM);
293     }
294 
295     /*
296      * Print a little information about the controller.
297      */
298     amr_describe_controller(sc);
299 
300     debug(2, "attach complete");
301     return(0);
302 }
303 
304 /********************************************************************************
305  * Locate disk resources and attach children to them.
306  */
307 static void
308 amr_startup(void *arg)
309 {
310     struct amr_softc	*sc = (struct amr_softc *)arg;
311     struct amr_logdrive	*dr;
312     int			i, error;
313 
314     debug_called(1);
315     callout_init(&sc->amr_timeout);
316 
317     /* pull ourselves off the intrhook chain */
318     if (sc->amr_ich.ich_func)
319 	config_intrhook_disestablish(&sc->amr_ich);
320     sc->amr_ich.ich_func = NULL;
321 
322     /* get up-to-date drive information */
323     if (amr_query_controller(sc)) {
324 	device_printf(sc->amr_dev, "can't scan controller for drives\n");
325 	return;
326     }
327 
328     /* iterate over available drives */
329     for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) {
330 	/* are we already attached to this drive? */
331 	if (dr->al_disk == 0) {
332 	    /* generate geometry information */
333 	    if (dr->al_size > 0x200000) {	/* extended translation? */
334 		dr->al_heads = 255;
335 		dr->al_sectors = 63;
336 	    } else {
337 		dr->al_heads = 64;
338 		dr->al_sectors = 32;
339 	    }
340 	    dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors);
341 
342 	    dr->al_disk = device_add_child(sc->amr_dev, NULL, -1);
343 	    if (dr->al_disk == 0)
344 		device_printf(sc->amr_dev, "device_add_child failed\n");
345 	    device_set_ivars(dr->al_disk, dr);
346 	}
347     }
348 
349     if ((error = bus_generic_attach(sc->amr_dev)) != 0)
350 	device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error);
351 
352     /* mark controller back up */
353     sc->amr_state &= ~AMR_STATE_SHUTDOWN;
354 
355     /* interrupts will be enabled before we do anything more */
356     sc->amr_state |= AMR_STATE_INTEN;
357 
358 #if 0
359     /*
360      * Start the timeout routine.
361      */
362     sc->amr_timeout = timeout(amr_periodic, sc, hz);
363 #endif
364 
365     return;
366 }
367 
368 static void
369 amr_init_sysctl(struct amr_softc *sc)
370 {
371 
372     SYSCTL_ADD_INT(&sc->amr_sysctl_ctx,
373 	SYSCTL_CHILDREN(sc->amr_sysctl_tree),
374 	OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0,
375 	"");
376     SYSCTL_ADD_INT(&sc->amr_sysctl_ctx,
377 	SYSCTL_CHILDREN(sc->amr_sysctl_tree),
378 	OID_AUTO, "nextslot", CTLFLAG_RD, &sc->amr_nextslot, 0,
379 	"");
380     SYSCTL_ADD_INT(&sc->amr_sysctl_ctx,
381 	SYSCTL_CHILDREN(sc->amr_sysctl_tree),
382 	OID_AUTO, "busyslots", CTLFLAG_RD, &sc->amr_busyslots, 0,
383 	"");
384     SYSCTL_ADD_INT(&sc->amr_sysctl_ctx,
385 	SYSCTL_CHILDREN(sc->amr_sysctl_tree),
386 	OID_AUTO, "maxio", CTLFLAG_RD, &sc->amr_maxio, 0,
387 	"");
388 }
389 
390 
391 /*******************************************************************************
392  * Free resources associated with a controller instance
393  */
394 void
395 amr_free(struct amr_softc *sc)
396 {
397     struct amr_command_cluster	*acc;
398 
399     /* detach from CAM */
400     if (sc->amr_pass != NULL)
401 	device_delete_child(sc->amr_dev, sc->amr_pass);
402 
403     /* cancel status timeout */
404     callout_stop(&sc->amr_timeout);
405 
406     /* throw away any command buffers */
407     while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) {
408 	TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link);
409 	amr_freecmd_cluster(acc);
410     }
411 
412     /* destroy control device */
413     if(sc->amr_dev_t != NULL)
414 	    destroy_dev(sc->amr_dev_t);
415     dev_ops_remove_minor(&amr_ops, device_get_unit(sc->amr_dev));
416 
417 #if 0 /* XXX swildner */
418     if (mtx_initialized(&sc->amr_hw_lock))
419 	mtx_destroy(&sc->amr_hw_lock);
420 
421     if (mtx_initialized(&sc->amr_list_lock))
422 	mtx_destroy(&sc->amr_list_lock);
423 #endif
424 
425     if (sc->amr_sysctl_tree != NULL)
426 	    sysctl_ctx_free(&sc->amr_sysctl_ctx);
427 
428     lockuninit(&sc->amr_hw_lock);
429     lockuninit(&sc->amr_list_lock);
430 }
431 
432 /*******************************************************************************
433  * Receive a bio structure from a child device and queue it on a particular
434  * disk resource, then poke the disk resource to start as much work as it can.
435  */
436 int
437 amr_submit_bio(struct amr_softc *sc, struct bio *bio)
438 {
439     debug_called(2);
440 
441     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
442     amr_enqueue_bio(sc, bio);
443     amr_startio(sc);
444     lockmgr(&sc->amr_list_lock, LK_RELEASE);
445     return(0);
446 }
447 
448 /********************************************************************************
449  * Accept an open operation on the control device.
450  */
451 static int
452 amr_open(struct dev_open_args *ap)
453 {
454     cdev_t		dev = ap->a_head.a_dev;
455     int			unit = minor(dev);
456     struct amr_softc	*sc = devclass_get_softc(devclass_find("amr"), unit);
457 
458     debug_called(1);
459 
460     sc->amr_state |= AMR_STATE_OPEN;
461     return(0);
462 }
463 
464 #ifdef LSI
465 static int
466 amr_del_ld(struct amr_softc *sc, int drv_no, int status)
467 {
468 
469     debug_called(1);
470 
471     sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
472     sc->amr_state &= ~AMR_STATE_LD_DELETE;
473     sc->amr_state |= AMR_STATE_REMAP_LD;
474     debug(1, "State Set");
475 
476     if (!status) {
477 	debug(1, "disk begin destroyed %d",drv_no);
478 	if (--amr_disks_registered == 0)
479 	    cdevsw_remove(&amrddisk_cdevsw);
480 	debug(1, "disk begin destroyed success");
481     }
482     return 0;
483 }
484 
485 static int
486 amr_prepare_ld_delete(struct amr_softc *sc)
487 {
488 
489     debug_called(1);
490     if (sc->ld_del_supported == 0)
491 	return(ENOIOCTL);
492 
493     sc->amr_state |= AMR_STATE_QUEUE_FRZN;
494     sc->amr_state |= AMR_STATE_LD_DELETE;
495 
496     /* 5 minutes for the all the commands to be flushed.*/
497     tsleep((void *)&sc->ld_del_supported, PCATCH,"delete_logical_drv",hz * 60 * 1);
498     if ( sc->amr_busyslots )
499 	return(ENOIOCTL);
500 
501     return 0;
502 }
503 #endif
504 
505 /********************************************************************************
506  * Accept the last close on the control device.
507  */
508 static int
509 amr_close(struct dev_close_args *ap)
510 {
511     cdev_t		dev = ap->a_head.a_dev;
512     int			unit = minor(dev);
513     struct amr_softc	*sc = devclass_get_softc(devclass_find("amr"), unit);
514 
515     debug_called(1);
516 
517     sc->amr_state &= ~AMR_STATE_OPEN;
518     return (0);
519 }
520 
521 /********************************************************************************
522  * Handle controller-specific control operations.
523  */
524 static void
525 amr_rescan_drives(struct cdev *dev)
526 {
527     struct amr_softc	*sc = (struct amr_softc *)dev->si_drv1;
528     int			i, error = 0;
529 
530     sc->amr_state |= AMR_STATE_REMAP_LD;
531     while (sc->amr_busyslots) {
532 	device_printf(sc->amr_dev, "idle controller\n");
533 	amr_done(sc);
534     }
535 
536     /* mark ourselves as in-shutdown */
537     sc->amr_state |= AMR_STATE_SHUTDOWN;
538 
539     /* flush controller */
540     device_printf(sc->amr_dev, "flushing cache...");
541     kprintf("%s\n", amr_flush(sc) ? "failed" : "done");
542 
543     /* delete all our child devices */
544     for(i = 0 ; i < AMR_MAXLD; i++) {
545 	if(sc->amr_drive[i].al_disk != 0) {
546 	    if((error = device_delete_child(sc->amr_dev,
547 		sc->amr_drive[i].al_disk)) != 0)
548 		goto shutdown_out;
549 
550 	     sc->amr_drive[i].al_disk = 0;
551 	}
552     }
553 
554 shutdown_out:
555     amr_startup(sc);
556 }
557 
558 /*
559  * Bug-for-bug compatibility with Linux!
560  * Some apps will send commands with inlen and outlen set to 0,
561  * even though they expect data to be transfered to them from the
562  * card.  Linux accidentally allows this by allocating a 4KB
563  * buffer for the transfer anyways, but it then throws it away
564  * without copying it back to the app.
565  *
566  * The amr(4) firmware relies on this feature.  In fact, it assumes
567  * the buffer is always a power of 2 up to a max of 64k.  There is
568  * also at least one case where it assumes a buffer less than 16k is
569  * greater than 16k.  Force a minimum buffer size of 32k and round
570  * sizes between 32k and 64k up to 64k as a workaround.
571  */
572 static unsigned long
573 amr_ioctl_buffer_length(unsigned long len)
574 {
575 
576     if (len <= 32 * 1024)
577 	return (32 * 1024);
578     if (len <= 64 * 1024)
579 	return (64 * 1024);
580     return (len);
581 }
582 
583 int
584 amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag,
585     struct sysmsg *sm)
586 {
587     struct amr_softc		*sc = (struct amr_softc *)dev->si_drv1;
588     struct amr_command		*ac;
589     struct amr_mailbox		*mb;
590     struct amr_linux_ioctl	ali;
591     void			*dp, *temp;
592     int				error;
593     int				adapter, len, ac_flags = 0;
594     int				logical_drives_changed = 0;
595     u_int32_t			linux_version = 0x02100000;
596     u_int8_t			status;
597     struct amr_passthrough	*ap;	/* 60 bytes */
598 
599     error = 0;
600     dp = NULL;
601     ac = NULL;
602     ap = NULL;
603 
604     if ((error = copyin(addr, &ali, sizeof(ali))) != 0)
605 	return (error);
606     switch (ali.ui.fcs.opcode) {
607     case 0x82:
608 	switch(ali.ui.fcs.subopcode) {
609 	case 'e':
610 	    copyout(&linux_version, (void *)(uintptr_t)ali.data,
611 		sizeof(linux_version));
612 	    error = 0;
613 	    break;
614 
615 	case 'm':
616 	    copyout(&linux_no_adapter, (void *)(uintptr_t)ali.data,
617 		sizeof(linux_no_adapter));
618 	    sm->sm_result.iresult = linux_no_adapter;
619 	    error = 0;
620 	    break;
621 
622 	default:
623 	    kprintf("Unknown subopcode\n");
624 	    error = ENOIOCTL;
625 	    break;
626 	}
627     break;
628 
629     case 0x80:
630     case 0x81:
631 	if (ali.ui.fcs.opcode == 0x80)
632 	    len = max(ali.outlen, ali.inlen);
633 	else
634 	    len = ali.ui.fcs.length;
635 
636 	adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
637 
638 	mb = (void *)&ali.mbox[0];
639 
640 	if ((ali.mbox[0] == FC_DEL_LOGDRV  && ali.mbox[2] == OP_DEL_LOGDRV) ||	/* delete */
641 	    (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) {		/* create */
642 	    if (sc->amr_allow_vol_config == 0) {
643 		error = EPERM;
644 		break;
645 	    }
646 	    logical_drives_changed = 1;
647 	}
648 
649 	if (ali.mbox[0] == AMR_CMD_PASS) {
650 	    lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
651 	    while ((ac = amr_alloccmd(sc)) == NULL)
652 		lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz);
653 	    lockmgr(&sc->amr_list_lock, LK_RELEASE);
654 	    ap = &ac->ac_ccb->ccb_pthru;
655 
656 	    error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap,
657 		sizeof(struct amr_passthrough));
658 	    if (error)
659 		break;
660 
661 	    if (ap->ap_data_transfer_length)
662 		dp = kmalloc(ap->ap_data_transfer_length, M_AMR,
663 		    M_WAITOK | M_ZERO);
664 
665 	    if (ali.inlen) {
666 		error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address,
667 		    dp, ap->ap_data_transfer_length);
668 		if (error)
669 		    break;
670 	    }
671 
672 	    ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB;
673 	    bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
674 	    ac->ac_mailbox.mb_command = AMR_CMD_PASS;
675 	    ac->ac_flags = ac_flags;
676 
677 	    ac->ac_data = dp;
678 	    ac->ac_length = ap->ap_data_transfer_length;
679 	    temp = (void *)(uintptr_t)ap->ap_data_transfer_address;
680 
681 	    lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
682 	    error = amr_wait_command(ac);
683 	    lockmgr(&sc->amr_list_lock, LK_RELEASE);
684 	    if (error)
685 		break;
686 
687 	    status = ac->ac_status;
688 	    error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status));
689 	    if (error)
690 		break;
691 
692 	    if (ali.outlen) {
693 		error = copyout(dp, temp, ap->ap_data_transfer_length);
694 	        if (error)
695 		    break;
696 	    }
697 	    error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length);
698 	    if (error)
699 		break;
700 
701 	    error = 0;
702 	    break;
703 	} else if (ali.mbox[0] == AMR_CMD_PASS_64) {
704 	    kprintf("No AMR_CMD_PASS_64\n");
705 	    error = ENOIOCTL;
706 	    break;
707 	} else if (ali.mbox[0] == AMR_CMD_EXTPASS) {
708 	    kprintf("No AMR_CMD_EXTPASS\n");
709 	    error = ENOIOCTL;
710 	    break;
711 	} else {
712 	    len = amr_ioctl_buffer_length(imax(ali.inlen, ali.outlen));
713 
714 	    dp = kmalloc(len, M_AMR, M_WAITOK | M_ZERO);
715 
716 	    if (ali.inlen) {
717 		error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len);
718 		if (error)
719 		    break;
720 	    }
721 
722 	    lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
723 	    while ((ac = amr_alloccmd(sc)) == NULL)
724 		lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz);
725 
726 	    ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
727 	    bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
728 	    bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox));
729 
730 	    ac->ac_length = len;
731 	    ac->ac_data = dp;
732 	    ac->ac_flags = ac_flags;
733 
734 	    error = amr_wait_command(ac);
735 	    lockmgr(&sc->amr_list_lock, LK_RELEASE);
736 	    if (error)
737 		break;
738 
739 	    status = ac->ac_status;
740 	    error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status));
741 	    if (ali.outlen) {
742 		error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, ali.outlen);
743 		if (error)
744 		    break;
745 	    }
746 
747 	    error = 0;
748 	    if (logical_drives_changed)
749 		amr_rescan_drives(dev);
750 	    break;
751 	}
752 	break;
753 
754     default:
755 	debug(1, "unknown linux ioctl 0x%lx", cmd);
756 	kprintf("unknown linux ioctl 0x%lx\n", cmd);
757 	error = ENOIOCTL;
758 	break;
759     }
760 
761     /*
762      * At this point, we know that there is a lock held and that these
763      * objects have been allocated.
764      */
765     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
766     if (ac != NULL)
767 	amr_releasecmd(ac);
768     lockmgr(&sc->amr_list_lock, LK_RELEASE);
769     if (dp != NULL)
770 	kfree(dp, M_AMR);
771     return(error);
772 }
773 
774 static int
775 amr_ioctl(struct dev_ioctl_args *ap)
776 {
777     cdev_t			dev = ap->a_head.a_dev;
778     caddr_t			addr = ap->a_data;
779     u_long			cmd = ap->a_cmd;
780     struct amr_softc		*sc = (struct amr_softc *)dev->si_drv1;
781     union {
782 	void			*_p;
783 	struct amr_user_ioctl	*au;
784 #ifdef AMR_IO_COMMAND32
785 	struct amr_user_ioctl32	*au32;
786 #endif
787 	int			*result;
788     } arg;
789     struct amr_command		*ac;
790     struct amr_mailbox_ioctl	*mbi;
791     void			*dp, *au_buffer;
792     unsigned long		au_length, real_length;
793     unsigned char		*au_cmd;
794     int				*au_statusp, au_direction;
795     int				error;
796     struct amr_passthrough	*_ap;	/* 60 bytes */
797     int				logical_drives_changed = 0;
798 
799     debug_called(1);
800 
801     arg._p = (void *)addr;
802 
803     error = 0;
804     dp = NULL;
805     ac = NULL;
806     _ap = NULL;
807 
808     switch(cmd) {
809 
810     case AMR_IO_VERSION:
811 	debug(1, "AMR_IO_VERSION");
812 	*arg.result = AMR_IO_VERSION_NUMBER;
813 	return(0);
814 
815 #ifdef AMR_IO_COMMAND32
816     /*
817      * Accept ioctl-s from 32-bit binaries on non-32-bit
818      * platforms, such as AMD. LSI's MEGAMGR utility is
819      * the only example known today...	-mi
820      */
821     case AMR_IO_COMMAND32:
822 	debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]);
823 	au_cmd = arg.au32->au_cmd;
824 	au_buffer = (void *)(u_int64_t)arg.au32->au_buffer;
825 	au_length = arg.au32->au_length;
826 	au_direction = arg.au32->au_direction;
827 	au_statusp = &arg.au32->au_status;
828 	break;
829 #endif
830 
831     case AMR_IO_COMMAND:
832 	debug(1, "AMR_IO_COMMAND  0x%x", arg.au->au_cmd[0]);
833 	au_cmd = arg.au->au_cmd;
834 	au_buffer = (void *)arg.au->au_buffer;
835 	au_length = arg.au->au_length;
836 	au_direction = arg.au->au_direction;
837 	au_statusp = &arg.au->au_status;
838 	break;
839 
840     case 0xc0046d00:
841     case 0xc06e6d00:	/* Linux emulation */
842 	{
843 	    devclass_t			devclass;
844 	    struct amr_linux_ioctl	ali;
845 	    int				adapter, error;
846 
847 	    devclass = devclass_find("amr");
848 	    if (devclass == NULL)
849 		return (ENOENT);
850 
851 	    error = copyin(addr, &ali, sizeof(ali));
852 	    if (error)
853 		return (error);
854 	    if (ali.ui.fcs.opcode == 0x82)
855 		adapter = 0;
856 	    else
857 		adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
858 
859 	    sc = devclass_get_softc(devclass, adapter);
860 	    if (sc == NULL)
861 		return (ENOENT);
862 
863 	    return (amr_linux_ioctl_int(sc->amr_dev_t, cmd, addr, 0, ap->a_sysmsg));
864 	}
865     default:
866 	debug(1, "unknown ioctl 0x%lx", cmd);
867 	return(ENOIOCTL);
868     }
869 
870     if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) ||	/* delete */
871 	(au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) {		/* create */
872 	if (sc->amr_allow_vol_config == 0) {
873 	    error = EPERM;
874 	    goto out;
875 	}
876 	logical_drives_changed = 1;
877 #ifdef LSI
878 	if ((error = amr_prepare_ld_delete(sc)) != 0)
879 	    return (error);
880 #endif
881     }
882 
883     /* handle inbound data buffer */
884     real_length = amr_ioctl_buffer_length(au_length);
885     if (au_length != 0 && au_cmd[0] != 0x06) {
886 	if ((dp = kmalloc(real_length, M_AMR, M_WAITOK|M_ZERO)) == NULL) {
887 	    error = ENOMEM;
888 	    goto out;
889 	}
890 	if ((error = copyin(au_buffer, dp, au_length)) != 0) {
891 	    kfree(dp, M_AMR);
892 	    return (error);
893 	}
894 	debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp);
895     }
896 
897     /* Allocate this now before the mutex gets held */
898 
899     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
900     while ((ac = amr_alloccmd(sc)) == NULL)
901 	lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz);
902 
903     /* handle SCSI passthrough command */
904     if (au_cmd[0] == AMR_CMD_PASS) {
905         int len;
906 
907 	_ap = &ac->ac_ccb->ccb_pthru;
908 	bzero(_ap, sizeof(struct amr_passthrough));
909 
910 	/* copy cdb */
911         len = au_cmd[2];
912 	_ap->ap_cdb_length = len;
913 	bcopy(au_cmd + 3, _ap->ap_cdb, len);
914 
915 	/* build passthrough */
916 	_ap->ap_timeout		= au_cmd[len + 3] & 0x07;
917 	_ap->ap_ars		= (au_cmd[len + 3] & 0x08) ? 1 : 0;
918 	_ap->ap_islogical	= (au_cmd[len + 3] & 0x80) ? 1 : 0;
919 	_ap->ap_logical_drive_no = au_cmd[len + 4];
920 	_ap->ap_channel		= au_cmd[len + 5];
921 	_ap->ap_scsi_id 	= au_cmd[len + 6];
922 	_ap->ap_request_sense_length	= 14;
923 	_ap->ap_data_transfer_length	= au_length;
924 	/* XXX what about the request-sense area? does the caller want it? */
925 
926 	/* build command */
927 	ac->ac_mailbox.mb_command = AMR_CMD_PASS;
928 	ac->ac_flags = AMR_CMD_CCB;
929 
930     } else {
931 	/* direct command to controller */
932 	mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox;
933 
934 	/* copy pertinent mailbox items */
935 	mbi->mb_command = au_cmd[0];
936 	mbi->mb_channel = au_cmd[1];
937 	mbi->mb_param = au_cmd[2];
938 	mbi->mb_pad[0] = au_cmd[3];
939 	mbi->mb_drive = au_cmd[4];
940 	ac->ac_flags = 0;
941     }
942 
943     /* build the command */
944     ac->ac_data = dp;
945     ac->ac_length = real_length;
946     ac->ac_flags |= AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
947 
948     /* run the command */
949     error = amr_wait_command(ac);
950     lockmgr(&sc->amr_list_lock, LK_RELEASE);
951     if (error)
952 	goto out;
953 
954     /* copy out data and set status */
955     if (au_length != 0) {
956 	error = copyout(dp, au_buffer, au_length);
957     }
958     debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer);
959     if (dp != NULL)
960 	debug(2, "%p status 0x%x", dp, ac->ac_status);
961     *au_statusp = ac->ac_status;
962 
963 out:
964     /*
965      * At this point, we know that there is a lock held and that these
966      * objects have been allocated.
967      */
968     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
969     if (ac != NULL)
970 	amr_releasecmd(ac);
971     lockmgr(&sc->amr_list_lock, LK_RELEASE);
972     if (dp != NULL)
973 	kfree(dp, M_AMR);
974 
975 #ifndef LSI
976     if (logical_drives_changed)
977 	amr_rescan_drives(dev);
978 #endif
979 
980     return(error);
981 }
982 
983 #if 0
984 /********************************************************************************
985  ********************************************************************************
986                                                                 Status Monitoring
987  ********************************************************************************
988  ********************************************************************************/
989 
990 /********************************************************************************
991  * Perform a periodic check of the controller status
992  */
993 static void
994 amr_periodic(void *data)
995 {
996     struct amr_softc	*sc = (struct amr_softc *)data;
997 
998     debug_called(2);
999 
1000     /* XXX perform periodic status checks here */
1001 
1002     /* compensate for missed interrupts */
1003     amr_done(sc);
1004 
1005     /* reschedule */
1006     callout_reset(&sc->amr_timeout, hz, amr_periodic, sc);
1007 }
1008 #endif
1009 
1010 /********************************************************************************
1011  ********************************************************************************
1012                                                                  Command Wrappers
1013  ********************************************************************************
1014  ********************************************************************************/
1015 
1016 /********************************************************************************
1017  * Interrogate the controller for the operational parameters we require.
1018  */
1019 static int
1020 amr_query_controller(struct amr_softc *sc)
1021 {
1022     struct amr_enquiry3	*aex;
1023     struct amr_prodinfo	*ap;
1024     struct amr_enquiry	*ae;
1025     int			ldrv;
1026     int			status;
1027 
1028     /*
1029      * Greater than 10 byte cdb support
1030      */
1031     sc->support_ext_cdb = amr_support_ext_cdb(sc);
1032 
1033     if(sc->support_ext_cdb) {
1034 	debug(2,"supports extended CDBs.");
1035     }
1036 
1037     /*
1038      * Try to issue an ENQUIRY3 command
1039      */
1040     if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
1041 			   AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) {
1042 
1043 	/*
1044 	 * Fetch current state of logical drives.
1045 	 */
1046 	for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
1047 	    sc->amr_drive[ldrv].al_size       = aex->ae_drivesize[ldrv];
1048 	    sc->amr_drive[ldrv].al_state      = aex->ae_drivestate[ldrv];
1049 	    sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv];
1050 	    debug(2, "  drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
1051 		  sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1052 	}
1053 	kfree(aex, M_AMR);
1054 
1055 	/*
1056 	 * Get product info for channel count.
1057 	 */
1058 	if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) {
1059 	    device_printf(sc->amr_dev, "can't obtain product data from controller\n");
1060 	    return(1);
1061 	}
1062 	sc->amr_maxdrives = 40;
1063 	sc->amr_maxchan = ap->ap_nschan;
1064 	sc->amr_maxio = ap->ap_maxio;
1065 	sc->amr_type |= AMR_TYPE_40LD;
1066 	kfree(ap, M_AMR);
1067 
1068 	ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status);
1069 	if (ap != NULL)
1070 	    kfree(ap, M_AMR);
1071 	if (!status) {
1072 	    sc->amr_ld_del_supported = 1;
1073 	    device_printf(sc->amr_dev, "delete logical drives supported by controller\n");
1074 	}
1075     } else {
1076 
1077 	/* failed, try the 8LD ENQUIRY commands */
1078 	if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) {
1079 	    if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) {
1080 		device_printf(sc->amr_dev, "can't obtain configuration data from controller\n");
1081 		return(1);
1082 	    }
1083 	    ae->ae_signature = 0;
1084 	}
1085 
1086 	/*
1087 	 * Fetch current state of logical drives.
1088 	 */
1089 	for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
1090 	    sc->amr_drive[ldrv].al_size       = ae->ae_ldrv.al_size[ldrv];
1091 	    sc->amr_drive[ldrv].al_state      = ae->ae_ldrv.al_state[ldrv];
1092 	    sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv];
1093 	    debug(2, "  drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
1094 		  sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1095 	}
1096 
1097 	sc->amr_maxdrives = 8;
1098 	sc->amr_maxchan = ae->ae_adapter.aa_channels;
1099 	sc->amr_maxio = ae->ae_adapter.aa_maxio;
1100 	kfree(ae, M_AMR);
1101     }
1102 
1103     /*
1104      * Mark remaining drives as unused.
1105      */
1106     for (; ldrv < AMR_MAXLD; ldrv++)
1107 	sc->amr_drive[ldrv].al_size = 0xffffffff;
1108 
1109     /*
1110      * Cap the maximum number of outstanding I/Os.  AMI's Linux driver doesn't trust
1111      * the controller's reported value, and lockups have been seen when we do.
1112      */
1113     sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD);
1114 
1115     return(0);
1116 }
1117 
1118 /********************************************************************************
1119  * Run a generic enquiry-style command.
1120  */
1121 static void *
1122 amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status)
1123 {
1124     struct amr_command	*ac;
1125     void		*result;
1126     u_int8_t		*mbox;
1127     int			error;
1128 
1129     debug_called(1);
1130 
1131     error = 1;
1132     result = NULL;
1133 
1134     /* get ourselves a command buffer */
1135     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1136     ac = amr_alloccmd(sc);
1137     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1138     if (ac == NULL)
1139 	goto out;
1140     /* allocate the response structure */
1141     if ((result = kmalloc(bufsize, M_AMR, M_ZERO|M_NOWAIT)) == NULL)
1142 	goto out;
1143     /* set command flags */
1144 
1145     ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN;
1146 
1147     /* point the command at our data */
1148     ac->ac_data = result;
1149     ac->ac_length = bufsize;
1150 
1151     /* build the command proper */
1152     mbox = (u_int8_t *)&ac->ac_mailbox;		/* XXX want a real structure for this? */
1153     mbox[0] = cmd;
1154     mbox[2] = cmdsub;
1155     mbox[3] = cmdqual;
1156     *status = 0;
1157 
1158     /* can't assume that interrupts are going to work here, so play it safe */
1159     if (sc->amr_poll_command(ac))
1160 	goto out;
1161     error = ac->ac_status;
1162     *status = ac->ac_status;
1163 
1164  out:
1165     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1166     if (ac != NULL)
1167 	amr_releasecmd(ac);
1168     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1169     if ((error != 0) && (result != NULL)) {
1170 	kfree(result, M_AMR);
1171 	result = NULL;
1172     }
1173     return(result);
1174 }
1175 
1176 /********************************************************************************
1177  * Flush the controller's internal cache, return status.
1178  */
1179 int
1180 amr_flush(struct amr_softc *sc)
1181 {
1182     struct amr_command	*ac;
1183     int			error;
1184 
1185     /* get ourselves a command buffer */
1186     error = 1;
1187     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1188     ac = amr_alloccmd(sc);
1189     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1190     if (ac == NULL)
1191 	goto out;
1192     /* set command flags */
1193     ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1194 
1195     /* build the command proper */
1196     ac->ac_mailbox.mb_command = AMR_CMD_FLUSH;
1197 
1198     /* we have to poll, as the system may be going down or otherwise damaged */
1199     if (sc->amr_poll_command(ac))
1200 	goto out;
1201     error = ac->ac_status;
1202 
1203  out:
1204     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1205     if (ac != NULL)
1206 	amr_releasecmd(ac);
1207     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1208     return(error);
1209 }
1210 
1211 /********************************************************************************
1212  * Detect extented cdb >> greater than 10 byte cdb support
1213  * returns '1' means this support exist
1214  * returns '0' means this support doesn't exist
1215  */
1216 static int
1217 amr_support_ext_cdb(struct amr_softc *sc)
1218 {
1219     struct amr_command	*ac;
1220     u_int8_t		*mbox;
1221     int			error;
1222 
1223     /* get ourselves a command buffer */
1224     error = 0;
1225     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1226     ac = amr_alloccmd(sc);
1227     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1228     if (ac == NULL)
1229 	goto out;
1230     /* set command flags */
1231     ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1232 
1233     /* build the command proper */
1234     mbox = (u_int8_t *)&ac->ac_mailbox;		/* XXX want a real structure for this? */
1235     mbox[0] = 0xA4;
1236     mbox[2] = 0x16;
1237 
1238 
1239     /* we have to poll, as the system may be going down or otherwise damaged */
1240     if (sc->amr_poll_command(ac))
1241 	goto out;
1242     if( ac->ac_status == AMR_STATUS_SUCCESS ) {
1243 	    error = 1;
1244     }
1245 
1246 out:
1247     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1248     if (ac != NULL)
1249 	amr_releasecmd(ac);
1250     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1251     return(error);
1252 }
1253 
1254 /********************************************************************************
1255  * Try to find I/O work for the controller from one or more of the work queues.
1256  *
1257  * We make the assumption that if the controller is not ready to take a command
1258  * at some given time, it will generate an interrupt at some later time when
1259  * it is.
1260  */
1261 void
1262 amr_startio(struct amr_softc *sc)
1263 {
1264     struct amr_command	*ac;
1265 
1266     /* spin until something prevents us from doing any work */
1267     for (;;) {
1268 
1269 	/* Don't bother to queue commands no bounce buffers are available. */
1270 	if (sc->amr_state & AMR_STATE_QUEUE_FRZN)
1271 	    break;
1272 
1273 	/* try to get a ready command */
1274 	ac = amr_dequeue_ready(sc);
1275 
1276 	/* if that failed, build a command from a bio */
1277 	if (ac == NULL)
1278 	    (void)amr_bio_command(sc, &ac);
1279 
1280 	/* if that failed, build a command from a ccb */
1281 	if ((ac == NULL) && (sc->amr_cam_command != NULL))
1282 	    sc->amr_cam_command(sc, &ac);
1283 
1284 	/* if we don't have anything to do, give up */
1285 	if (ac == NULL)
1286 	    break;
1287 
1288 	/* try to give the command to the controller; if this fails save it for later and give up */
1289 	if (amr_start(ac)) {
1290 	    debug(2, "controller busy, command deferred");
1291 	    amr_requeue_ready(ac);	/* XXX schedule retry very soon? */
1292 	    break;
1293 	}
1294     }
1295 }
1296 
1297 /********************************************************************************
1298  * Handle completion of an I/O command.
1299  */
1300 static void
1301 amr_completeio(struct amr_command *ac)
1302 {
1303     struct amr_softc		*sc = ac->ac_sc;
1304     static struct timeval	lastfail;
1305     static int			curfail;
1306     struct buf			*bp = ac->ac_bio->bio_buf;
1307 
1308     if (ac->ac_status != AMR_STATUS_SUCCESS) {	/* could be more verbose here? */
1309 	bp->b_error = EIO;
1310 	bp->b_flags |= B_ERROR;
1311 
1312 	if (ppsratecheck(&lastfail, &curfail, 1))
1313 	    device_printf(sc->amr_dev, "I/O error - 0x%x\n", ac->ac_status);
1314 /*	amr_printcommand(ac);*/
1315     }
1316     amrd_intr(ac->ac_bio);
1317     lockmgr(&ac->ac_sc->amr_list_lock, LK_EXCLUSIVE);
1318     amr_releasecmd(ac);
1319     lockmgr(&ac->ac_sc->amr_list_lock, LK_RELEASE);
1320 }
1321 
1322 /********************************************************************************
1323  ********************************************************************************
1324                                                                Command Processing
1325  ********************************************************************************
1326  ********************************************************************************/
1327 
1328 /********************************************************************************
1329  * Convert a bio off the top of the bio queue into a command.
1330  */
1331 static int
1332 amr_bio_command(struct amr_softc *sc, struct amr_command **acp)
1333 {
1334     struct amr_command	*ac;
1335     struct amrd_softc	*amrd;
1336     struct bio		*bio;
1337     struct buf		*bp;
1338     int			error;
1339     int			blkcount;
1340     int			driveno;
1341     int			cmd;
1342 
1343     ac = NULL;
1344     error = 0;
1345 
1346     /* get a command */
1347     if ((ac = amr_alloccmd(sc)) == NULL)
1348 	return (ENOMEM);
1349 
1350     /* get a bio to work on */
1351     if ((bio = amr_dequeue_bio(sc)) == NULL) {
1352 	amr_releasecmd(ac);
1353 	return (0);
1354     }
1355 
1356     /* connect the bio to the command */
1357     bp = bio->bio_buf;
1358     ac->ac_complete = amr_completeio;
1359     ac->ac_bio = bio;
1360     ac->ac_data = bp->b_data;
1361     ac->ac_length = bp->b_bcount;
1362     cmd = 0;
1363     switch (bp->b_cmd) {
1364     case BUF_CMD_READ:
1365 	ac->ac_flags |= AMR_CMD_DATAIN;
1366 	if (AMR_IS_SG64(sc)) {
1367 	    cmd = AMR_CMD_LREAD64;
1368 	    ac->ac_flags |= AMR_CMD_SG64;
1369 	} else
1370 	    cmd = AMR_CMD_LREAD;
1371 	break;
1372     case BUF_CMD_WRITE:
1373 	ac->ac_flags |= AMR_CMD_DATAOUT;
1374 	if (AMR_IS_SG64(sc)) {
1375 	    cmd = AMR_CMD_LWRITE64;
1376 	    ac->ac_flags |= AMR_CMD_SG64;
1377 	} else
1378 	    cmd = AMR_CMD_LWRITE;
1379 	break;
1380     case BUF_CMD_FLUSH:
1381 	ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1382 	cmd = AMR_CMD_FLUSH;
1383 	break;
1384     default:
1385 	panic("Invalid bio command");
1386     }
1387     amrd = (struct amrd_softc *)bio->bio_driver_info;
1388     driveno = amrd->amrd_drive - sc->amr_drive;
1389     blkcount = (bp->b_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE;
1390 
1391     ac->ac_mailbox.mb_command = cmd;
1392     if (bp->b_cmd & (BUF_CMD_READ|BUF_CMD_WRITE)) {
1393 	ac->ac_mailbox.mb_blkcount = blkcount;
1394 	ac->ac_mailbox.mb_lba = bio->bio_offset / AMR_BLKSIZE;
1395 	if (((bio->bio_offset / AMR_BLKSIZE) + blkcount) > sc->amr_drive[driveno].al_size) {
1396 	    device_printf(sc->amr_dev,
1397 			  "I/O beyond end of unit (%lld,%d > %lu)\n",
1398 			  (long long)(bio->bio_offset / AMR_BLKSIZE), blkcount,
1399 			  (u_long)sc->amr_drive[driveno].al_size);
1400 	}
1401     }
1402     ac->ac_mailbox.mb_drive = driveno;
1403     if (sc->amr_state & AMR_STATE_REMAP_LD)
1404 	ac->ac_mailbox.mb_drive |= 0x80;
1405 
1406     /* we fill in the s/g related data when the command is mapped */
1407 
1408 
1409     *acp = ac;
1410     return(error);
1411 }
1412 
1413 /********************************************************************************
1414  * Take a command, submit it to the controller and sleep until it completes
1415  * or fails.  Interrupts must be enabled, returns nonzero on error.
1416  */
1417 static int
1418 amr_wait_command(struct amr_command *ac)
1419 {
1420     int			error = 0;
1421     struct amr_softc	*sc = ac->ac_sc;
1422 
1423     debug_called(1);
1424 
1425     ac->ac_complete = NULL;
1426     ac->ac_flags |= AMR_CMD_SLEEP;
1427     if ((error = amr_start(ac)) != 0) {
1428 	return(error);
1429     }
1430 
1431     while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) {
1432 	error = lksleep(ac,&sc->amr_list_lock, 0, "amrwcmd", 0);
1433     }
1434 
1435     return(error);
1436 }
1437 
1438 /********************************************************************************
1439  * Take a command, submit it to the controller and busy-wait for it to return.
1440  * Returns nonzero on error.  Can be safely called with interrupts enabled.
1441  */
1442 static int
1443 amr_std_poll_command(struct amr_command *ac)
1444 {
1445     struct amr_softc	*sc = ac->ac_sc;
1446     int			error, count;
1447 
1448     debug_called(2);
1449 
1450     ac->ac_complete = NULL;
1451     if ((error = amr_start(ac)) != 0)
1452 	return(error);
1453 
1454     count = 0;
1455     do {
1456 	/*
1457 	 * Poll for completion, although the interrupt handler may beat us to it.
1458 	 * Note that the timeout here is somewhat arbitrary.
1459 	 */
1460 	amr_done(sc);
1461 	DELAY(1000);
1462     } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000));
1463     if (!(ac->ac_flags & AMR_CMD_BUSY)) {
1464 	error = 0;
1465     } else {
1466 	/* XXX the slot is now marked permanently busy */
1467 	error = EIO;
1468 	device_printf(sc->amr_dev, "polled command timeout\n");
1469     }
1470     return(error);
1471 }
1472 
1473 static void
1474 amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1475 {
1476     struct amr_command *ac = arg;
1477     struct amr_softc *sc = ac->ac_sc;
1478     int mb_channel;
1479 
1480     if (err) {
1481 	device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__);
1482 	ac->ac_status = AMR_STATUS_ABORTED;
1483 	return;
1484     }
1485 
1486     amr_setup_sg(arg, segs, nsegs, err);
1487 
1488     /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1489     mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1490     if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1491         ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1492         (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1493 	((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1494 
1495     ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1496     ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1497     if (AC_IS_SG64(ac)) {
1498 	ac->ac_sg64_hi = 0;
1499 	ac->ac_sg64_lo = ac->ac_sgbusaddr;
1500     }
1501 
1502     sc->amr_poll_command1(sc, ac);
1503 }
1504 
1505 /********************************************************************************
1506  * Take a command, submit it to the controller and busy-wait for it to return.
1507  * Returns nonzero on error.  Can be safely called with interrupts enabled.
1508  */
1509 static int
1510 amr_quartz_poll_command(struct amr_command *ac)
1511 {
1512     struct amr_softc	*sc = ac->ac_sc;
1513     int			error;
1514 
1515     debug_called(2);
1516 
1517     error = 0;
1518 
1519     if (AC_IS_SG64(ac)) {
1520 	ac->ac_tag = sc->amr_buffer64_dmat;
1521 	ac->ac_datamap = ac->ac_dma64map;
1522     } else {
1523 	ac->ac_tag = sc->amr_buffer_dmat;
1524 	ac->ac_datamap = ac->ac_dmamap;
1525     }
1526 
1527     /* now we have a slot, we can map the command (unmapped in amr_complete) */
1528     if (ac->ac_data != NULL && ac->ac_length != 0) {
1529 	if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1530 	    ac->ac_length, amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) {
1531 	    error = 1;
1532 	}
1533     } else {
1534 	error = amr_quartz_poll_command1(sc, ac);
1535     }
1536 
1537     return (error);
1538 }
1539 
1540 static int
1541 amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac)
1542 {
1543     int count, error;
1544 
1545     lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE);
1546     if ((sc->amr_state & AMR_STATE_INTEN) == 0) {
1547 	count=0;
1548 	while (sc->amr_busyslots) {
1549 	    lksleep(sc, &sc->amr_hw_lock, PCATCH, "amrpoll", hz);
1550 	    if(count++>10) {
1551 		break;
1552 	    }
1553 	}
1554 
1555 	if(sc->amr_busyslots) {
1556 	    device_printf(sc->amr_dev, "adapter is busy\n");
1557 	    lockmgr(&sc->amr_hw_lock, LK_RELEASE);
1558 	    if (ac->ac_data != NULL) {
1559 		bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1560 	    }
1561 	    ac->ac_status=0;
1562 	    return(1);
1563 	}
1564     }
1565 
1566     bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE);
1567 
1568     /* clear the poll/ack fields in the mailbox */
1569     sc->amr_mailbox->mb_ident = 0xFE;
1570     sc->amr_mailbox->mb_nstatus = 0xFF;
1571     sc->amr_mailbox->mb_status = 0xFF;
1572     sc->amr_mailbox->mb_poll = 0;
1573     sc->amr_mailbox->mb_ack = 0;
1574     sc->amr_mailbox->mb_busy = 1;
1575 
1576     AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
1577 
1578     while(sc->amr_mailbox->mb_nstatus == 0xFF)
1579 	DELAY(1);
1580     while(sc->amr_mailbox->mb_status == 0xFF)
1581 	DELAY(1);
1582     ac->ac_status=sc->amr_mailbox->mb_status;
1583     error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0;
1584     while(sc->amr_mailbox->mb_poll != 0x77)
1585 	DELAY(1);
1586     sc->amr_mailbox->mb_poll = 0;
1587     sc->amr_mailbox->mb_ack = 0x77;
1588 
1589     /* acknowledge that we have the commands */
1590     AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK);
1591     while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
1592 	DELAY(1);
1593     lockmgr(&sc->amr_hw_lock, LK_RELEASE);
1594 
1595     /* unmap the command's data buffer */
1596     if (ac->ac_flags & AMR_CMD_DATAIN) {
1597 	bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTREAD);
1598     }
1599     if (ac->ac_flags & AMR_CMD_DATAOUT) {
1600 	bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTWRITE);
1601     }
1602     bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1603 
1604     return(error);
1605 }
1606 
1607 static __inline int
1608 amr_freeslot(struct amr_command *ac)
1609 {
1610     struct amr_softc *sc = ac->ac_sc;
1611     int			slot;
1612 
1613     debug_called(3);
1614 
1615     slot = ac->ac_slot;
1616     if (sc->amr_busycmd[slot] == NULL)
1617 	panic("amr: slot %d not busy?", slot);
1618 
1619     sc->amr_busycmd[slot] = NULL;
1620     atomic_subtract_int(&sc->amr_busyslots, 1);
1621 
1622     return (0);
1623 }
1624 
1625 /********************************************************************************
1626  * Map/unmap (ac)'s data in the controller's addressable space as required.
1627  *
1628  * These functions may be safely called multiple times on a given command.
1629  */
1630 static void
1631 amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1632 {
1633     struct amr_command	*ac = (struct amr_command *)arg;
1634     struct amr_sgentry	*sg;
1635     struct amr_sg64entry *sg64;
1636     int flags, i;
1637 
1638     debug_called(3);
1639 
1640     /* get base address of s/g table */
1641     sg = ac->ac_sg.sg32;
1642     sg64 = ac->ac_sg.sg64;
1643 
1644     if (AC_IS_SG64(ac)) {
1645 	ac->ac_nsegments = nsegments;
1646 	ac->ac_mb_physaddr = 0xffffffff;
1647 	for (i = 0; i < nsegments; i++, sg64++) {
1648 	    sg64->sg_addr = segs[i].ds_addr;
1649 	    sg64->sg_count = segs[i].ds_len;
1650 	}
1651     } else {
1652 	/* decide whether we need to populate the s/g table */
1653 	if (nsegments < 2) {
1654 	    ac->ac_nsegments = 0;
1655 	    ac->ac_mb_physaddr = segs[0].ds_addr;
1656 	} else {
1657             ac->ac_nsegments = nsegments;
1658 	    ac->ac_mb_physaddr = ac->ac_sgbusaddr;
1659 	    for (i = 0; i < nsegments; i++, sg++) {
1660 		sg->sg_addr = segs[i].ds_addr;
1661 		sg->sg_count = segs[i].ds_len;
1662 	    }
1663 	}
1664     }
1665 
1666     flags = 0;
1667     if (ac->ac_flags & AMR_CMD_DATAIN)
1668 	flags |= BUS_DMASYNC_PREREAD;
1669     if (ac->ac_flags & AMR_CMD_DATAOUT)
1670 	flags |= BUS_DMASYNC_PREWRITE;
1671     bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flags);
1672     ac->ac_flags |= AMR_CMD_MAPPED;
1673 }
1674 
1675 static void
1676 amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1677 {
1678     struct amr_command *ac = arg;
1679     struct amr_softc *sc = ac->ac_sc;
1680     int mb_channel;
1681 
1682     if (err) {
1683 	device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__);
1684 	amr_abort_load(ac);
1685 	return;
1686     }
1687 
1688     amr_setup_sg(arg, segs, nsegs, err);
1689 
1690     /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1691     mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1692     if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1693         ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1694         (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1695 	((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1696 
1697     ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1698     ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1699     if (AC_IS_SG64(ac)) {
1700 	ac->ac_sg64_hi = 0;
1701 	ac->ac_sg64_lo = ac->ac_sgbusaddr;
1702     }
1703 
1704     if (sc->amr_submit_command(ac) == EBUSY) {
1705 	amr_freeslot(ac);
1706 	amr_requeue_ready(ac);
1707     }
1708 }
1709 
1710 static void
1711 amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1712 {
1713     struct amr_command *ac = arg;
1714     struct amr_softc *sc = ac->ac_sc;
1715     struct amr_passthrough *ap = &ac->ac_ccb->ccb_pthru;
1716     struct amr_ext_passthrough *aep = &ac->ac_ccb->ccb_epthru;
1717 
1718     if (err) {
1719 	device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__);
1720 	amr_abort_load(ac);
1721 	return;
1722     }
1723 
1724     /* Set up the mailbox portion of the command to point at the ccb */
1725     ac->ac_mailbox.mb_nsgelem = 0;
1726     ac->ac_mailbox.mb_physaddr = ac->ac_ccb_busaddr;
1727 
1728     amr_setup_sg(arg, segs, nsegs, err);
1729 
1730     switch (ac->ac_mailbox.mb_command) {
1731     case AMR_CMD_EXTPASS:
1732 	aep->ap_no_sg_elements = ac->ac_nsegments;
1733 	aep->ap_data_transfer_address = ac->ac_mb_physaddr;
1734         break;
1735     case AMR_CMD_PASS:
1736 	ap->ap_no_sg_elements = ac->ac_nsegments;
1737 	ap->ap_data_transfer_address = ac->ac_mb_physaddr;
1738 	break;
1739     default:
1740 	panic("Unknown ccb command");
1741     }
1742 
1743     if (sc->amr_submit_command(ac) == EBUSY) {
1744 	amr_freeslot(ac);
1745 	amr_requeue_ready(ac);
1746     }
1747 }
1748 
1749 static int
1750 amr_mapcmd(struct amr_command *ac)
1751 {
1752     bus_dmamap_callback_t *cb;
1753     struct amr_softc	*sc = ac->ac_sc;
1754 
1755     debug_called(3);
1756 
1757     if (AC_IS_SG64(ac)) {
1758 	ac->ac_tag = sc->amr_buffer64_dmat;
1759 	ac->ac_datamap = ac->ac_dma64map;
1760     } else {
1761 	ac->ac_tag = sc->amr_buffer_dmat;
1762 	ac->ac_datamap = ac->ac_dmamap;
1763     }
1764 
1765     if (ac->ac_flags & AMR_CMD_CCB)
1766 	cb = amr_setup_ccb;
1767     else
1768 	cb = amr_setup_data;
1769 
1770     /* if the command involves data at all, and hasn't been mapped */
1771     if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) {
1772 	/* map the data buffers into bus space and build the s/g list */
1773 	if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1774 	     ac->ac_length, cb, ac, 0) == EINPROGRESS) {
1775 	    sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1776 	}
1777    } else {
1778 	if (sc->amr_submit_command(ac) == EBUSY) {
1779 	    amr_freeslot(ac);
1780 	    amr_requeue_ready(ac);
1781 	}
1782    }
1783 
1784     return (0);
1785 }
1786 
1787 static void
1788 amr_unmapcmd(struct amr_command *ac)
1789 {
1790     int			flag;
1791 
1792     debug_called(3);
1793 
1794     /* if the command involved data at all and was mapped */
1795     if (ac->ac_flags & AMR_CMD_MAPPED) {
1796 
1797 	if (ac->ac_data != NULL) {
1798 
1799 	    flag = 0;
1800 	    if (ac->ac_flags & AMR_CMD_DATAIN)
1801 		flag |= BUS_DMASYNC_POSTREAD;
1802 	    if (ac->ac_flags & AMR_CMD_DATAOUT)
1803 		flag |= BUS_DMASYNC_POSTWRITE;
1804 
1805 	    bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flag);
1806 	    bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1807 	}
1808 
1809 	ac->ac_flags &= ~AMR_CMD_MAPPED;
1810     }
1811 }
1812 
1813 static void
1814 amr_abort_load(struct amr_command *ac)
1815 {
1816     ac_qhead_t head;
1817     struct amr_softc *sc = ac->ac_sc;
1818 
1819     KKASSERT(lockstatus(&sc->amr_list_lock, curthread) != 0);
1820 
1821     ac->ac_status = AMR_STATUS_ABORTED;
1822     amr_init_qhead(&head);
1823     amr_enqueue_completed(ac, &head);
1824 
1825     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1826     amr_complete(sc, &head);
1827     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1828 }
1829 
1830 /********************************************************************************
1831  * Take a command and give it to the controller, returns 0 if successful, or
1832  * EBUSY if the command should be retried later.
1833  */
1834 static int
1835 amr_start(struct amr_command *ac)
1836 {
1837     struct amr_softc *sc;
1838     int error = 0;
1839     int slot;
1840 
1841     debug_called(3);
1842 
1843     /* mark command as busy so that polling consumer can tell */
1844     sc = ac->ac_sc;
1845     ac->ac_flags |= AMR_CMD_BUSY;
1846 
1847     /* get a command slot (freed in amr_done) */
1848     slot = ac->ac_slot;
1849     if (sc->amr_busycmd[slot] != NULL)
1850 	panic("amr: slot %d busy?", slot);
1851     sc->amr_busycmd[slot] = ac;
1852     atomic_add_int(&sc->amr_busyslots, 1);
1853 
1854     /* Now we have a slot, we can map the command (unmapped in amr_complete). */
1855     if ((error = amr_mapcmd(ac)) == ENOMEM) {
1856 	/*
1857 	 * Memroy resources are short, so free the slot and let this be tried
1858 	 * later.
1859 	 */
1860 	amr_freeslot(ac);
1861     }
1862 
1863     return (error);
1864 }
1865 
1866 /********************************************************************************
1867  * Extract one or more completed commands from the controller (sc)
1868  *
1869  * Returns nonzero if any commands on the work queue were marked as completed.
1870  */
1871 
1872 int
1873 amr_done(struct amr_softc *sc)
1874 {
1875     ac_qhead_t		head;
1876     struct amr_command	*ac;
1877     struct amr_mailbox	mbox;
1878     int			i, idx, result;
1879 
1880     debug_called(3);
1881 
1882     /* See if there's anything for us to do */
1883     result = 0;
1884     amr_init_qhead(&head);
1885 
1886     /* loop collecting completed commands */
1887     for (;;) {
1888 	/* poll for a completed command's identifier and status */
1889 	if (sc->amr_get_work(sc, &mbox)) {
1890 	    result = 1;
1891 
1892 	    /* iterate over completed commands in this result */
1893 	    for (i = 0; i < mbox.mb_nstatus; i++) {
1894 		/* get pointer to busy command */
1895 		idx = mbox.mb_completed[i] - 1;
1896 		ac = sc->amr_busycmd[idx];
1897 
1898 		/* really a busy command? */
1899 		if (ac != NULL) {
1900 
1901 		    /* pull the command from the busy index */
1902 		    amr_freeslot(ac);
1903 
1904 		    /* save status for later use */
1905 		    ac->ac_status = mbox.mb_status;
1906 		    amr_enqueue_completed(ac, &head);
1907 		    debug(3, "completed command with status %x", mbox.mb_status);
1908 		} else {
1909 		    device_printf(sc->amr_dev, "bad slot %d completed\n", idx);
1910 		}
1911 	    }
1912 	} else
1913 	    break;	/* no work */
1914     }
1915 
1916     /* handle completion and timeouts */
1917     amr_complete(sc, &head);
1918 
1919     return(result);
1920 }
1921 
1922 /********************************************************************************
1923  * Do completion processing on done commands on (sc)
1924  */
1925 
1926 static void
1927 amr_complete(void *context, ac_qhead_t *head)
1928 {
1929     struct amr_softc	*sc = (struct amr_softc *)context;
1930     struct amr_command	*ac;
1931 
1932     debug_called(3);
1933 
1934     /* pull completed commands off the queue */
1935     for (;;) {
1936 	ac = amr_dequeue_completed(sc, head);
1937 	if (ac == NULL)
1938 	    break;
1939 
1940 	/* unmap the command's data buffer */
1941 	amr_unmapcmd(ac);
1942 
1943 	/*
1944 	 * Is there a completion handler?
1945 	 */
1946 	if (ac->ac_complete != NULL) {
1947 	    /* unbusy the command */
1948 	    ac->ac_flags &= ~AMR_CMD_BUSY;
1949 	    ac->ac_complete(ac);
1950 
1951 	    /*
1952 	     * Is someone sleeping on this one?
1953 	     */
1954 	} else {
1955 	    lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1956 	    ac->ac_flags &= ~AMR_CMD_BUSY;
1957 	    if (ac->ac_flags & AMR_CMD_SLEEP) {
1958 		/* unbusy the command */
1959 		wakeup(ac);
1960 	    }
1961 	    lockmgr(&sc->amr_list_lock, LK_RELEASE);
1962 	}
1963 
1964 	if(!sc->amr_busyslots) {
1965 	    wakeup(sc);
1966 	}
1967     }
1968 
1969     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1970     sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
1971     amr_startio(sc);
1972     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1973 }
1974 
1975 /********************************************************************************
1976  ********************************************************************************
1977                                                         Command Buffer Management
1978  ********************************************************************************
1979  ********************************************************************************/
1980 
1981 /********************************************************************************
1982  * Get a new command buffer.
1983  *
1984  * This may return NULL in low-memory cases.
1985  *
1986  * If possible, we recycle a command buffer that's been used before.
1987  */
1988 struct amr_command *
1989 amr_alloccmd(struct amr_softc *sc)
1990 {
1991     struct amr_command	*ac;
1992 
1993     debug_called(3);
1994 
1995     ac = amr_dequeue_free(sc);
1996     if (ac == NULL) {
1997 	sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1998 	return(NULL);
1999     }
2000 
2001     /* clear out significant fields */
2002     ac->ac_status = 0;
2003     bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox));
2004     ac->ac_flags = 0;
2005     ac->ac_bio = NULL;
2006     ac->ac_data = NULL;
2007     ac->ac_complete = NULL;
2008     ac->ac_retries = 0;
2009     ac->ac_tag = NULL;
2010     ac->ac_datamap = NULL;
2011     return(ac);
2012 }
2013 
2014 /********************************************************************************
2015  * Release a command buffer for recycling.
2016  */
2017 void
2018 amr_releasecmd(struct amr_command *ac)
2019 {
2020     debug_called(3);
2021 
2022     amr_enqueue_free(ac);
2023 }
2024 
2025 /********************************************************************************
2026  * Allocate a new command cluster and initialise it.
2027  */
2028 static void
2029 amr_alloccmd_cluster(struct amr_softc *sc)
2030 {
2031     struct amr_command_cluster	*acc;
2032     struct amr_command		*ac;
2033     int				i, nextslot;
2034 
2035     /*
2036      * If we haven't found the real limit yet, let us have a couple of
2037      * commands in order to be able to probe.
2038      */
2039     if (sc->amr_maxio == 0)
2040 	sc->amr_maxio = 2;
2041 
2042     if (sc->amr_nextslot > sc->amr_maxio)
2043 	return;
2044     acc = kmalloc(AMR_CMD_CLUSTERSIZE, M_AMR, M_NOWAIT | M_ZERO);
2045     if (acc != NULL) {
2046 	nextslot = sc->amr_nextslot;
2047 	lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
2048 	TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link);
2049 	lockmgr(&sc->amr_list_lock, LK_RELEASE);
2050 	for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2051 	    ac = &acc->acc_command[i];
2052 	    ac->ac_sc = sc;
2053 	    ac->ac_slot = nextslot;
2054 
2055 	    /*
2056 	     * The SG table for each slot is a fixed size and is assumed to
2057 	     * to hold 64-bit s/g objects when the driver is configured to do
2058 	     * 64-bit DMA.  32-bit DMA commands still use the same table, but
2059 	     * cast down to 32-bit objects.
2060 	     */
2061 	    if (AMR_IS_SG64(sc)) {
2062 		ac->ac_sgbusaddr = sc->amr_sgbusaddr +
2063 		    (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry));
2064 	        ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG);
2065 	    } else {
2066 		ac->ac_sgbusaddr = sc->amr_sgbusaddr +
2067 		    (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry));
2068 	        ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2069 	    }
2070 
2071 	    ac->ac_ccb = sc->amr_ccb + ac->ac_slot;
2072 	    ac->ac_ccb_busaddr = sc->amr_ccb_busaddr +
2073 		(ac->ac_slot * sizeof(union amr_ccb));
2074 
2075 	    if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap))
2076 		break;
2077 	    if (AMR_IS_SG64(sc) &&
2078 		(bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map)))
2079 		break;
2080 	    amr_releasecmd(ac);
2081 	    if (++nextslot > sc->amr_maxio)
2082 		break;
2083 	}
2084 	sc->amr_nextslot = nextslot;
2085     }
2086 }
2087 
2088 /********************************************************************************
2089  * Free a command cluster
2090  */
2091 static void
2092 amr_freecmd_cluster(struct amr_command_cluster *acc)
2093 {
2094     struct amr_softc	*sc = acc->acc_command[0].ac_sc;
2095     int			i;
2096 
2097     for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2098 	if (acc->acc_command[i].ac_sc == NULL)
2099 	    break;
2100 	bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap);
2101 	if (AMR_IS_SG64(sc))
2102 		bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map);
2103     }
2104     kfree(acc, M_AMR);
2105 }
2106 
2107 /********************************************************************************
2108  ********************************************************************************
2109                                                          Interface-specific Shims
2110  ********************************************************************************
2111  ********************************************************************************/
2112 
2113 /********************************************************************************
2114  * Tell the controller that the mailbox contains a valid command
2115  */
2116 static int
2117 amr_quartz_submit_command(struct amr_command *ac)
2118 {
2119     struct amr_softc	*sc = ac->ac_sc;
2120     static struct timeval lastfail;
2121     static int		curfail;
2122     int			i = 0;
2123 
2124     lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE);
2125     while (sc->amr_mailbox->mb_busy && (i++ < 10)) {
2126         DELAY(1);
2127 	/* This is a no-op read that flushes pending mailbox updates */
2128 	AMR_QGET_ODB(sc);
2129     }
2130     if (sc->amr_mailbox->mb_busy) {
2131 	lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2132 	if (ac->ac_retries++ > 1000) {
2133 	    if (ppsratecheck(&lastfail, &curfail, 1))
2134 		device_printf(sc->amr_dev, "Too many retries on command %p.  "
2135 			      "Controller is likely dead\n", ac);
2136 	    ac->ac_retries = 0;
2137 	}
2138 	return (EBUSY);
2139     }
2140 
2141     /*
2142      * Save the slot number so that we can locate this command when complete.
2143      * Note that ident = 0 seems to be special, so we don't use it.
2144      */
2145     ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2146     bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2147     sc->amr_mailbox->mb_busy = 1;
2148     sc->amr_mailbox->mb_poll = 0;
2149     sc->amr_mailbox->mb_ack  = 0;
2150     sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi;
2151     sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo;
2152 
2153     AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
2154     lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2155     return(0);
2156 }
2157 
2158 static int
2159 amr_std_submit_command(struct amr_command *ac)
2160 {
2161     struct amr_softc	*sc = ac->ac_sc;
2162     static struct timeval lastfail;
2163     static int		curfail;
2164 
2165     lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE);
2166     if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) {
2167 	lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2168 	if (ac->ac_retries++ > 1000) {
2169 	    if (ppsratecheck(&lastfail, &curfail, 1))
2170 		device_printf(sc->amr_dev, "Too many retries on command %p.  "
2171 			      "Controller is likely dead\n", ac);
2172 	    ac->ac_retries = 0;
2173 	}
2174 	return (EBUSY);
2175     }
2176 
2177     /*
2178      * Save the slot number so that we can locate this command when complete.
2179      * Note that ident = 0 seems to be special, so we don't use it.
2180      */
2181     ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2182     bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2183     sc->amr_mailbox->mb_busy = 1;
2184     sc->amr_mailbox->mb_poll = 0;
2185     sc->amr_mailbox->mb_ack  = 0;
2186 
2187     AMR_SPOST_COMMAND(sc);
2188     lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2189     return(0);
2190 }
2191 
2192 /********************************************************************************
2193  * Claim any work that the controller has completed; acknowledge completion,
2194  * save details of the completion in (mbsave)
2195  */
2196 static int
2197 amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2198 {
2199     int		worked, i;
2200     u_int32_t	outd;
2201     u_int8_t	nstatus;
2202     u_int8_t	completed[46];
2203 
2204     debug_called(3);
2205 
2206     worked = 0;
2207 
2208     /* work waiting for us? */
2209     if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) {
2210 
2211 	/* acknowledge interrupt */
2212 	AMR_QPUT_ODB(sc, AMR_QODB_READY);
2213 
2214 	while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff)
2215 	    DELAY(1);
2216 	sc->amr_mailbox->mb_nstatus = 0xff;
2217 
2218 	/* wait until fw wrote out all completions */
2219 	for (i = 0; i < nstatus; i++) {
2220 	    while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff)
2221 		DELAY(1);
2222 	    sc->amr_mailbox->mb_completed[i] = 0xff;
2223 	}
2224 
2225 	/* Save information for later processing */
2226 	mbsave->mb_nstatus = nstatus;
2227 	mbsave->mb_status = sc->amr_mailbox->mb_status;
2228 	sc->amr_mailbox->mb_status = 0xff;
2229 
2230 	for (i = 0; i < nstatus; i++)
2231 	    mbsave->mb_completed[i] = completed[i];
2232 
2233 	/* acknowledge that we have the commands */
2234 	AMR_QPUT_IDB(sc, AMR_QIDB_ACK);
2235 
2236 #if 0
2237 #ifndef AMR_QUARTZ_GOFASTER
2238 	/*
2239 	 * This waits for the controller to notice that we've taken the
2240 	 * command from it.  It's very inefficient, and we shouldn't do it,
2241 	 * but if we remove this code, we stop completing commands under
2242 	 * load.
2243 	 *
2244 	 * Peter J says we shouldn't do this.  The documentation says we
2245 	 * should.  Who is right?
2246 	 */
2247 	while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
2248 	    ;				/* XXX aiee! what if it dies? */
2249 #endif
2250 #endif
2251 
2252 	worked = 1;			/* got some work */
2253     }
2254 
2255     return(worked);
2256 }
2257 
2258 static int
2259 amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2260 {
2261     int		worked;
2262     u_int8_t	istat;
2263 
2264     debug_called(3);
2265 
2266     worked = 0;
2267 
2268     /* check for valid interrupt status */
2269     istat = AMR_SGET_ISTAT(sc);
2270     if ((istat & AMR_SINTR_VALID) != 0) {
2271 	AMR_SPUT_ISTAT(sc, istat);	/* ack interrupt status */
2272 
2273 	/* save mailbox, which contains a list of completed commands */
2274 	bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave));
2275 
2276 	AMR_SACK_INTERRUPT(sc);		/* acknowledge we have the mailbox */
2277 	worked = 1;
2278     }
2279 
2280     return(worked);
2281 }
2282 
2283 /********************************************************************************
2284  * Notify the controller of the mailbox location.
2285  */
2286 static void
2287 amr_std_attach_mailbox(struct amr_softc *sc)
2288 {
2289 
2290     /* program the mailbox physical address */
2291     AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys         & 0xff);
2292     AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >>  8) & 0xff);
2293     AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff);
2294     AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff);
2295     AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR);
2296 
2297     /* clear any outstanding interrupt and enable interrupts proper */
2298     AMR_SACK_INTERRUPT(sc);
2299     AMR_SENABLE_INTR(sc);
2300 }
2301 
2302 #ifdef AMR_BOARD_INIT
2303 /********************************************************************************
2304  * Initialise the controller
2305  */
2306 static int
2307 amr_quartz_init(struct amr_softc *sc)
2308 {
2309     int		status, ostatus;
2310 
2311     device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc));
2312 
2313     AMR_QRESET(sc);
2314 
2315     ostatus = 0xff;
2316     while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) {
2317 	if (status != ostatus) {
2318 	    device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status));
2319 	    ostatus = status;
2320 	}
2321 	switch (status) {
2322 	case AMR_QINIT_NOMEM:
2323 	    return(ENOMEM);
2324 
2325 	case AMR_QINIT_SCAN:
2326 	    /* XXX we could print channel/target here */
2327 	    break;
2328 	}
2329     }
2330     return(0);
2331 }
2332 
2333 static int
2334 amr_std_init(struct amr_softc *sc)
2335 {
2336     int		status, ostatus;
2337 
2338     device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc));
2339 
2340     AMR_SRESET(sc);
2341 
2342     ostatus = 0xff;
2343     while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) {
2344 	if (status != ostatus) {
2345 	    device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status));
2346 	    ostatus = status;
2347 	}
2348 	switch (status) {
2349 	case AMR_SINIT_NOMEM:
2350 	    return(ENOMEM);
2351 
2352 	case AMR_SINIT_INPROG:
2353 	    /* XXX we could print channel/target here? */
2354 	    break;
2355 	}
2356     }
2357     return(0);
2358 }
2359 #endif
2360 
2361 /********************************************************************************
2362  ********************************************************************************
2363                                                                         Debugging
2364  ********************************************************************************
2365  ********************************************************************************/
2366 
2367 /********************************************************************************
2368  * Identify the controller and print some information about it.
2369  */
2370 static void
2371 amr_describe_controller(struct amr_softc *sc)
2372 {
2373     struct amr_prodinfo	*ap;
2374     struct amr_enquiry	*ae;
2375     char		*prod;
2376     int			status;
2377 
2378     /*
2379      * Try to get 40LD product info, which tells us what the card is labelled as.
2380      */
2381     if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) {
2382 	device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n",
2383 		      ap->ap_product, ap->ap_firmware, ap->ap_bios,
2384 		      ap->ap_memsize);
2385 
2386 	kfree(ap, M_AMR);
2387 	return;
2388     }
2389 
2390     /*
2391      * Try 8LD extended ENQUIRY to get controller signature, and use lookup table.
2392      */
2393     if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) {
2394 	prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature);
2395 
2396     } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) {
2397 
2398 	/*
2399 	 * Try to work it out based on the PCI signatures.
2400 	 */
2401 	switch (pci_get_device(sc->amr_dev)) {
2402 	case 0x9010:
2403 	    prod = "Series 428";
2404 	    break;
2405 	case 0x9060:
2406 	    prod = "Series 434";
2407 	    break;
2408 	default:
2409 	    prod = "unknown controller";
2410 	    break;
2411 	}
2412     } else {
2413 	device_printf(sc->amr_dev, "<unsupported controller>\n");
2414 	return;
2415     }
2416 
2417     /*
2418      * HP NetRaid controllers have a special encoding of the firmware and
2419      * BIOS versions. The AMI version seems to have it as strings whereas
2420      * the HP version does it with a leading uppercase character and two
2421      * binary numbers.
2422      */
2423 
2424     if(ae->ae_adapter.aa_firmware[2] >= 'A' &&
2425        ae->ae_adapter.aa_firmware[2] <= 'Z' &&
2426        ae->ae_adapter.aa_firmware[1] <  ' ' &&
2427        ae->ae_adapter.aa_firmware[0] <  ' ' &&
2428        ae->ae_adapter.aa_bios[2] >= 'A'     &&
2429        ae->ae_adapter.aa_bios[2] <= 'Z'     &&
2430        ae->ae_adapter.aa_bios[1] <  ' '     &&
2431        ae->ae_adapter.aa_bios[0] <  ' ') {
2432 
2433 	/* this looks like we have an HP NetRaid version of the MegaRaid */
2434 
2435     	if(ae->ae_signature == AMR_SIG_438) {
2436     		/* the AMI 438 is a NetRaid 3si in HP-land */
2437     		prod = "HP NetRaid 3si";
2438     	}
2439 
2440 	device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n",
2441 		      prod, ae->ae_adapter.aa_firmware[2],
2442 		      ae->ae_adapter.aa_firmware[1],
2443 		      ae->ae_adapter.aa_firmware[0],
2444 		      ae->ae_adapter.aa_bios[2],
2445 		      ae->ae_adapter.aa_bios[1],
2446 		      ae->ae_adapter.aa_bios[0],
2447 		      ae->ae_adapter.aa_memorysize);
2448     } else {
2449 	device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n",
2450 		      prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios,
2451 		      ae->ae_adapter.aa_memorysize);
2452     }
2453     kfree(ae, M_AMR);
2454 }
2455 
2456 int
2457 amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks)
2458 {
2459     struct amr_command	*ac;
2460     int			error = EIO;
2461 
2462     debug_called(1);
2463 
2464     sc->amr_state |= AMR_STATE_INTEN;
2465 
2466     /* get ourselves a command buffer */
2467     if ((ac = amr_alloccmd(sc)) == NULL)
2468 	goto out;
2469     /* set command flags */
2470     ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
2471 
2472     /* point the command at our data */
2473     ac->ac_data = data;
2474     ac->ac_length = blks * AMR_BLKSIZE;
2475 
2476     /* build the command proper */
2477     ac->ac_mailbox.mb_command 	= AMR_CMD_LWRITE;
2478     ac->ac_mailbox.mb_blkcount	= blks;
2479     ac->ac_mailbox.mb_lba	= lba;
2480     ac->ac_mailbox.mb_drive	= unit;
2481 
2482     /* can't assume that interrupts are going to work here, so play it safe */
2483     if (sc->amr_poll_command(ac))
2484 	goto out;
2485     error = ac->ac_status;
2486 
2487  out:
2488     if (ac != NULL)
2489 	amr_releasecmd(ac);
2490 
2491     sc->amr_state &= ~AMR_STATE_INTEN;
2492     return (error);
2493 }
2494 
2495 
2496 
2497 #ifdef AMR_DEBUG
2498 /********************************************************************************
2499  * Print the command (ac) in human-readable format
2500  */
2501 #if 0
2502 static void
2503 amr_printcommand(struct amr_command *ac)
2504 {
2505     struct amr_softc	*sc = ac->ac_sc;
2506     struct amr_sgentry	*sg;
2507     int			i;
2508 
2509     device_printf(sc->amr_dev, "cmd %x  ident %d  drive %d\n",
2510 		  ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive);
2511     device_printf(sc->amr_dev, "blkcount %d  lba %d\n",
2512 		  ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba);
2513     device_printf(sc->amr_dev, "virtaddr %p  length %lu\n", ac->ac_data, (unsigned long)ac->ac_length);
2514     device_printf(sc->amr_dev, "sg physaddr %08x  nsg %d\n",
2515 		  ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem);
2516     device_printf(sc->amr_dev, "ccb %p  bio %p\n", ac->ac_ccb_data, ac->ac_bio);
2517 
2518     /* get base address of s/g table */
2519     sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2520     for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++)
2521 	device_printf(sc->amr_dev, "  %x/%d\n", sg->sg_addr, sg->sg_count);
2522 }
2523 #endif
2524 #endif
2525