xref: /dragonfly/sys/dev/raid/amr/amr.c (revision 0720b42f)
1 /*-
2  * Copyright (c) 1999,2000 Michael Smith
3  * Copyright (c) 2000 BSDi
4  * Copyright (c) 2005 Scott Long
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002 Eric Moore
30  * Copyright (c) 2002, 2004 LSI Logic Corporation
31  * All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  * 3. The party using or redistributing the source code and binary forms
42  *    agrees to the disclaimer below and the terms and conditions set forth
43  *    herein.
44  *
45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55  * SUCH DAMAGE.
56  *
57  * $FreeBSD: src/sys/dev/amr/amr.c,v 1.99 2012/08/31 09:42:46 scottl Exp $
58  */
59 
60 /*
61  * Driver for the AMI MegaRaid family of controllers.
62  */
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/malloc.h>
67 #include <sys/kernel.h>
68 #include <sys/proc.h>
69 #include <sys/sysctl.h>
70 #include <sys/sysmsg.h>
71 
72 #include <sys/bio.h>
73 #include <sys/bus.h>
74 #include <sys/conf.h>
75 #include <sys/stat.h>
76 
77 #include <machine/cpu.h>
78 #include <sys/rman.h>
79 
80 #include <bus/pci/pcireg.h>
81 #include <bus/pci/pcivar.h>
82 
83 #include <dev/raid/amr/amrio.h>
84 #include <dev/raid/amr/amrreg.h>
85 #include <dev/raid/amr/amrvar.h>
86 #define AMR_DEFINE_TABLES
87 #include <dev/raid/amr/amr_tables.h>
88 
89 SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD, 0, "AMR driver parameters");
90 
91 static d_open_t         amr_open;
92 static d_close_t        amr_close;
93 static d_ioctl_t        amr_ioctl;
94 
95 static struct dev_ops amr_ops = {
96 	{ "amr", 0, 0 },
97 	.d_open =	amr_open,
98 	.d_close =	amr_close,
99 	.d_ioctl =	amr_ioctl,
100 };
101 
102 int linux_no_adapter = 0;
103 /*
104  * Initialisation, bus interface.
105  */
106 static void	amr_startup(void *arg);
107 
108 /*
109  * Command wrappers
110  */
111 static int	amr_query_controller(struct amr_softc *sc);
112 static void	*amr_enquiry(struct amr_softc *sc, size_t bufsize,
113 			     u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status);
114 static void	amr_completeio(struct amr_command *ac);
115 static int	amr_support_ext_cdb(struct amr_softc *sc);
116 
117 /*
118  * Command buffer allocation.
119  */
120 static void	amr_alloccmd_cluster(struct amr_softc *sc);
121 static void	amr_freecmd_cluster(struct amr_command_cluster *acc);
122 
123 /*
124  * Command processing.
125  */
126 static int	amr_bio_command(struct amr_softc *sc, struct amr_command **acp);
127 static int	amr_wait_command(struct amr_command *ac);
128 static int	amr_mapcmd(struct amr_command *ac);
129 static void	amr_unmapcmd(struct amr_command *ac);
130 static int	amr_start(struct amr_command *ac);
131 static void	amr_complete(void *context, ac_qhead_t *head);
132 static void	amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
133 static void	amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
134 static void	amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
135 static void	amr_abort_load(struct amr_command *ac);
136 
137 /*
138  * Interface-specific shims
139  */
140 static int	amr_quartz_submit_command(struct amr_command *ac);
141 static int	amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
142 static int	amr_quartz_poll_command(struct amr_command *ac);
143 static int	amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac);
144 
145 static int	amr_std_submit_command(struct amr_command *ac);
146 static int	amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
147 static int	amr_std_poll_command(struct amr_command *ac);
148 static void	amr_std_attach_mailbox(struct amr_softc *sc);
149 
150 #ifdef AMR_BOARD_INIT
151 static int	amr_quartz_init(struct amr_softc *sc);
152 static int	amr_std_init(struct amr_softc *sc);
153 #endif
154 
155 /*
156  * Debugging
157  */
158 static void	amr_describe_controller(struct amr_softc *sc);
159 #ifdef AMR_DEBUG
160 #if 0
161 static void	amr_printcommand(struct amr_command *ac);
162 #endif
163 #endif
164 
165 static void	amr_init_sysctl(struct amr_softc *sc);
166 static int	amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr,
167 		    int32_t flag, struct sysmsg *sm);
168 
169 static MALLOC_DEFINE(M_AMR, "amr", "AMR memory");
170 
171 /********************************************************************************
172  ********************************************************************************
173                                                                       Inline Glue
174  ********************************************************************************
175  ********************************************************************************/
176 
177 /********************************************************************************
178  ********************************************************************************
179                                                                 Public Interfaces
180  ********************************************************************************
181  ********************************************************************************/
182 
183 /********************************************************************************
184  * Initialise the controller and softc.
185  */
186 int
187 amr_attach(struct amr_softc *sc)
188 {
189     device_t child;
190 
191     debug_called(1);
192 
193     /*
194      * Initialise per-controller queues.
195      */
196     amr_init_qhead(&sc->amr_freecmds);
197     amr_init_qhead(&sc->amr_ready);
198     TAILQ_INIT(&sc->amr_cmd_clusters);
199     bioq_init(&sc->amr_bioq);
200 
201     debug(2, "queue init done");
202 
203     /*
204      * Configure for this controller type.
205      */
206     if (AMR_IS_QUARTZ(sc)) {
207 	sc->amr_submit_command = amr_quartz_submit_command;
208 	sc->amr_get_work       = amr_quartz_get_work;
209 	sc->amr_poll_command   = amr_quartz_poll_command;
210 	sc->amr_poll_command1  = amr_quartz_poll_command1;
211     } else {
212 	sc->amr_submit_command = amr_std_submit_command;
213 	sc->amr_get_work       = amr_std_get_work;
214 	sc->amr_poll_command   = amr_std_poll_command;
215 	amr_std_attach_mailbox(sc);
216     }
217 
218 #ifdef AMR_BOARD_INIT
219     if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc)))
220 	return(ENXIO);
221 #endif
222 
223     /*
224      * Allocate initial commands.
225      */
226     amr_alloccmd_cluster(sc);
227 
228     /*
229      * Quiz controller for features and limits.
230      */
231     if (amr_query_controller(sc))
232 	return(ENXIO);
233 
234     debug(2, "controller query complete");
235 
236     /*
237      * preallocate the remaining commands.
238      */
239     while (sc->amr_nextslot < sc->amr_maxio)
240 	amr_alloccmd_cluster(sc);
241 
242     /*
243      * Setup sysctls.
244      */
245     amr_init_sysctl(sc);
246 
247     /*
248      * Attach our 'real' SCSI channels to CAM.
249      */
250     child = device_add_child(sc->amr_dev, "amrp", -1);
251     sc->amr_pass = child;
252     if (child != NULL) {
253 	device_set_softc(child, sc);
254 	device_set_desc(child, "SCSI Passthrough Bus");
255 	bus_generic_attach(sc->amr_dev);
256     }
257 
258     /*
259      * Create the control device.
260      */
261     sc->amr_dev_t = make_dev(&amr_ops, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR,
262 			     S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev));
263     sc->amr_dev_t->si_drv1 = sc;
264     linux_no_adapter++;
265     if (device_get_unit(sc->amr_dev) == 0)
266 	make_dev_alias(sc->amr_dev_t, "megadev0");
267 
268     /*
269      * Schedule ourselves to bring the controller up once interrupts are
270      * available.
271      */
272     bzero(&sc->amr_ich, sizeof(struct intr_config_hook));
273     sc->amr_ich.ich_func = amr_startup;
274     sc->amr_ich.ich_arg = sc;
275     sc->amr_ich.ich_desc = "amr";
276     if (config_intrhook_establish(&sc->amr_ich) != 0) {
277 	device_printf(sc->amr_dev, "can't establish configuration hook\n");
278 	return(ENOMEM);
279     }
280 
281     /*
282      * Print a little information about the controller.
283      */
284     amr_describe_controller(sc);
285 
286     debug(2, "attach complete");
287     return(0);
288 }
289 
290 /********************************************************************************
291  * Locate disk resources and attach children to them.
292  */
293 static void
294 amr_startup(void *arg)
295 {
296     struct amr_softc	*sc = (struct amr_softc *)arg;
297     struct amr_logdrive	*dr;
298     int			i, error;
299 
300     debug_called(1);
301 
302     /* pull ourselves off the intrhook chain */
303     if (sc->amr_ich.ich_func)
304 	config_intrhook_disestablish(&sc->amr_ich);
305     sc->amr_ich.ich_func = NULL;
306 
307     /* get up-to-date drive information */
308     if (amr_query_controller(sc)) {
309 	device_printf(sc->amr_dev, "can't scan controller for drives\n");
310 	return;
311     }
312 
313     /* iterate over available drives */
314     for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) {
315 	/* are we already attached to this drive? */
316 	if (dr->al_disk == 0) {
317 	    /* generate geometry information */
318 	    if (dr->al_size > 0x200000) {	/* extended translation? */
319 		dr->al_heads = 255;
320 		dr->al_sectors = 63;
321 	    } else {
322 		dr->al_heads = 64;
323 		dr->al_sectors = 32;
324 	    }
325 	    dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors);
326 
327 	    dr->al_disk = device_add_child(sc->amr_dev, NULL, -1);
328 	    if (dr->al_disk == 0)
329 		device_printf(sc->amr_dev, "device_add_child failed\n");
330 	    device_set_ivars(dr->al_disk, dr);
331 	}
332     }
333 
334     if ((error = bus_generic_attach(sc->amr_dev)) != 0)
335 	device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error);
336 
337     /* mark controller back up */
338     sc->amr_state &= ~AMR_STATE_SHUTDOWN;
339 
340     /* interrupts will be enabled before we do anything more */
341     sc->amr_state |= AMR_STATE_INTEN;
342 
343     return;
344 }
345 
346 static void
347 amr_init_sysctl(struct amr_softc *sc)
348 {
349     struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->amr_dev);
350     struct sysctl_oid *tree = device_get_sysctl_tree(sc->amr_dev);
351 
352     SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
353 	OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0,
354 	"");
355     SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
356 	OID_AUTO, "nextslot", CTLFLAG_RD, &sc->amr_nextslot, 0,
357 	"");
358     SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
359 	OID_AUTO, "busyslots", CTLFLAG_RD, &sc->amr_busyslots, 0,
360 	"");
361     SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
362 	OID_AUTO, "maxio", CTLFLAG_RD, &sc->amr_maxio, 0,
363 	"");
364 }
365 
366 
367 /*******************************************************************************
368  * Free resources associated with a controller instance
369  */
370 void
371 amr_free(struct amr_softc *sc)
372 {
373     struct amr_command_cluster	*acc;
374 
375     /* detach from CAM */
376     if (sc->amr_pass != NULL)
377 	device_delete_child(sc->amr_dev, sc->amr_pass);
378 
379     /* throw away any command buffers */
380     while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) {
381 	TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link);
382 	amr_freecmd_cluster(acc);
383     }
384 
385     /* destroy control device */
386     if(sc->amr_dev_t != NULL)
387 	    destroy_dev(sc->amr_dev_t);
388     dev_ops_remove_minor(&amr_ops, device_get_unit(sc->amr_dev));
389 
390 #if 0 /* XXX swildner */
391     if (mtx_initialized(&sc->amr_hw_lock))
392 	mtx_destroy(&sc->amr_hw_lock);
393 
394     if (mtx_initialized(&sc->amr_list_lock))
395 	mtx_destroy(&sc->amr_list_lock);
396 #endif
397 
398     lockuninit(&sc->amr_hw_lock);
399     lockuninit(&sc->amr_list_lock);
400 }
401 
402 /*******************************************************************************
403  * Receive a bio structure from a child device and queue it on a particular
404  * disk resource, then poke the disk resource to start as much work as it can.
405  */
406 int
407 amr_submit_bio(struct amr_softc *sc, struct bio *bio)
408 {
409     debug_called(2);
410 
411     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
412     amr_enqueue_bio(sc, bio);
413     amr_startio(sc);
414     lockmgr(&sc->amr_list_lock, LK_RELEASE);
415     return(0);
416 }
417 
418 /********************************************************************************
419  * Accept an open operation on the control device.
420  */
421 static int
422 amr_open(struct dev_open_args *ap)
423 {
424     cdev_t		dev = ap->a_head.a_dev;
425     int			unit = minor(dev);
426     struct amr_softc	*sc = devclass_get_softc(devclass_find("amr"), unit);
427 
428     debug_called(1);
429 
430     sc->amr_state |= AMR_STATE_OPEN;
431     return(0);
432 }
433 
434 /********************************************************************************
435  * Accept the last close on the control device.
436  */
437 static int
438 amr_close(struct dev_close_args *ap)
439 {
440     cdev_t		dev = ap->a_head.a_dev;
441     int			unit = minor(dev);
442     struct amr_softc	*sc = devclass_get_softc(devclass_find("amr"), unit);
443 
444     debug_called(1);
445 
446     sc->amr_state &= ~AMR_STATE_OPEN;
447     return (0);
448 }
449 
450 /********************************************************************************
451  * Handle controller-specific control operations.
452  */
453 static void
454 amr_rescan_drives(struct cdev *dev)
455 {
456     struct amr_softc	*sc = (struct amr_softc *)dev->si_drv1;
457     int			i, error = 0;
458 
459     sc->amr_state |= AMR_STATE_REMAP_LD;
460     while (sc->amr_busyslots) {
461 	device_printf(sc->amr_dev, "idle controller\n");
462 	amr_done(sc);
463     }
464 
465     /* mark ourselves as in-shutdown */
466     sc->amr_state |= AMR_STATE_SHUTDOWN;
467 
468     /* flush controller */
469     device_printf(sc->amr_dev, "flushing cache...");
470     kprintf("%s\n", amr_flush(sc) ? "failed" : "done");
471 
472     /* delete all our child devices */
473     for(i = 0 ; i < AMR_MAXLD; i++) {
474 	if(sc->amr_drive[i].al_disk != 0) {
475 	    if((error = device_delete_child(sc->amr_dev,
476 		sc->amr_drive[i].al_disk)) != 0)
477 		goto shutdown_out;
478 
479 	     sc->amr_drive[i].al_disk = 0;
480 	}
481     }
482 
483 shutdown_out:
484     amr_startup(sc);
485 }
486 
487 /*
488  * Bug-for-bug compatibility with Linux!
489  * Some apps will send commands with inlen and outlen set to 0,
490  * even though they expect data to be transfered to them from the
491  * card.  Linux accidentally allows this by allocating a 4KB
492  * buffer for the transfer anyways, but it then throws it away
493  * without copying it back to the app.
494  *
495  * The amr(4) firmware relies on this feature.  In fact, it assumes
496  * the buffer is always a power of 2 up to a max of 64k.  There is
497  * also at least one case where it assumes a buffer less than 16k is
498  * greater than 16k.  Force a minimum buffer size of 32k and round
499  * sizes between 32k and 64k up to 64k as a workaround.
500  */
501 static unsigned long
502 amr_ioctl_buffer_length(unsigned long len)
503 {
504 
505     if (len <= 32 * 1024)
506 	return (32 * 1024);
507     if (len <= 64 * 1024)
508 	return (64 * 1024);
509     return (len);
510 }
511 
512 int
513 amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag,
514     struct sysmsg *sm)
515 {
516     struct amr_softc		*sc = (struct amr_softc *)dev->si_drv1;
517     struct amr_command		*ac;
518     struct amr_mailbox		*mb;
519     struct amr_linux_ioctl	ali;
520     void			*dp, *temp;
521     int				error;
522     int				len, ac_flags = 0;
523     int				logical_drives_changed = 0;
524     u_int32_t			linux_version = 0x02100000;
525     u_int8_t			status;
526     struct amr_passthrough	*ap;	/* 60 bytes */
527 
528     error = 0;
529     dp = NULL;
530     ac = NULL;
531     ap = NULL;
532 
533     if ((error = copyin(addr, &ali, sizeof(ali))) != 0)
534 	return (error);
535     switch (ali.ui.fcs.opcode) {
536     case 0x82:
537 	switch(ali.ui.fcs.subopcode) {
538 	case 'e':
539 	    copyout(&linux_version, (void *)(uintptr_t)ali.data,
540 		sizeof(linux_version));
541 	    error = 0;
542 	    break;
543 
544 	case 'm':
545 	    copyout(&linux_no_adapter, (void *)(uintptr_t)ali.data,
546 		sizeof(linux_no_adapter));
547 	    sm->sm_result.iresult = linux_no_adapter;
548 	    error = 0;
549 	    break;
550 
551 	default:
552 	    kprintf("Unknown subopcode\n");
553 	    error = ENOIOCTL;
554 	    break;
555 	}
556     break;
557 
558     case 0x80:
559     case 0x81:
560 	if (ali.ui.fcs.opcode == 0x80)
561 	    len = max(ali.outlen, ali.inlen);
562 	else
563 	    len = ali.ui.fcs.length;
564 
565 	mb = (void *)&ali.mbox[0];
566 
567 	if ((ali.mbox[0] == FC_DEL_LOGDRV  && ali.mbox[2] == OP_DEL_LOGDRV) ||	/* delete */
568 	    (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) {		/* create */
569 	    if (sc->amr_allow_vol_config == 0) {
570 		error = EPERM;
571 		break;
572 	    }
573 	    logical_drives_changed = 1;
574 	}
575 
576 	if (ali.mbox[0] == AMR_CMD_PASS) {
577 	    lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
578 	    while ((ac = amr_alloccmd(sc)) == NULL)
579 		lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz);
580 	    lockmgr(&sc->amr_list_lock, LK_RELEASE);
581 	    ap = &ac->ac_ccb->ccb_pthru;
582 
583 	    error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap,
584 		sizeof(struct amr_passthrough));
585 	    if (error)
586 		break;
587 
588 	    if (ap->ap_data_transfer_length)
589 		dp = kmalloc(ap->ap_data_transfer_length, M_AMR,
590 		    M_WAITOK | M_ZERO);
591 
592 	    if (ali.inlen) {
593 		error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address,
594 		    dp, ap->ap_data_transfer_length);
595 		if (error)
596 		    break;
597 	    }
598 
599 	    ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB;
600 	    bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
601 	    ac->ac_mailbox.mb_command = AMR_CMD_PASS;
602 	    ac->ac_flags = ac_flags;
603 
604 	    ac->ac_data = dp;
605 	    ac->ac_length = ap->ap_data_transfer_length;
606 	    temp = (void *)(uintptr_t)ap->ap_data_transfer_address;
607 
608 	    lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
609 	    error = amr_wait_command(ac);
610 	    lockmgr(&sc->amr_list_lock, LK_RELEASE);
611 	    if (error)
612 		break;
613 
614 	    status = ac->ac_status;
615 	    error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status));
616 	    if (error)
617 		break;
618 
619 	    if (ali.outlen) {
620 		error = copyout(dp, temp, ap->ap_data_transfer_length);
621 	        if (error)
622 		    break;
623 	    }
624 	    error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length);
625 	    if (error)
626 		break;
627 
628 	    error = 0;
629 	    break;
630 	} else if (ali.mbox[0] == AMR_CMD_PASS_64) {
631 	    kprintf("No AMR_CMD_PASS_64\n");
632 	    error = ENOIOCTL;
633 	    break;
634 	} else if (ali.mbox[0] == AMR_CMD_EXTPASS) {
635 	    kprintf("No AMR_CMD_EXTPASS\n");
636 	    error = ENOIOCTL;
637 	    break;
638 	} else {
639 	    len = amr_ioctl_buffer_length(imax(ali.inlen, ali.outlen));
640 
641 	    dp = kmalloc(len, M_AMR, M_WAITOK | M_ZERO);
642 
643 	    if (ali.inlen) {
644 		error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len);
645 		if (error)
646 		    break;
647 	    }
648 
649 	    lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
650 	    while ((ac = amr_alloccmd(sc)) == NULL)
651 		lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz);
652 
653 	    ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
654 	    bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
655 	    bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox));
656 
657 	    ac->ac_length = len;
658 	    ac->ac_data = dp;
659 	    ac->ac_flags = ac_flags;
660 
661 	    error = amr_wait_command(ac);
662 	    lockmgr(&sc->amr_list_lock, LK_RELEASE);
663 	    if (error)
664 		break;
665 
666 	    status = ac->ac_status;
667 	    error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status));
668 	    if (ali.outlen) {
669 		error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, ali.outlen);
670 		if (error)
671 		    break;
672 	    }
673 
674 	    error = 0;
675 	    if (logical_drives_changed)
676 		amr_rescan_drives(dev);
677 	    break;
678 	}
679 	break;
680 
681     default:
682 	debug(1, "unknown linux ioctl 0x%lx", cmd);
683 	kprintf("unknown linux ioctl 0x%lx\n", cmd);
684 	error = ENOIOCTL;
685 	break;
686     }
687 
688     /*
689      * At this point, we know that there is a lock held and that these
690      * objects have been allocated.
691      */
692     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
693     if (ac != NULL)
694 	amr_releasecmd(ac);
695     lockmgr(&sc->amr_list_lock, LK_RELEASE);
696     if (dp != NULL)
697 	kfree(dp, M_AMR);
698     return(error);
699 }
700 
701 static int
702 amr_ioctl(struct dev_ioctl_args *ap)
703 {
704     cdev_t			dev = ap->a_head.a_dev;
705     caddr_t			addr = ap->a_data;
706     u_long			cmd = ap->a_cmd;
707     struct amr_softc		*sc = (struct amr_softc *)dev->si_drv1;
708     union {
709 	void			*_p;
710 	struct amr_user_ioctl	*au;
711 #ifdef AMR_IO_COMMAND32
712 	struct amr_user_ioctl32	*au32;
713 #endif
714 	int			*result;
715     } arg;
716     struct amr_command		*ac;
717     struct amr_mailbox_ioctl	*mbi;
718     void			*dp, *au_buffer;
719     unsigned long		au_length, real_length;
720     unsigned char		*au_cmd;
721     int				*au_statusp;
722     int				error;
723     struct amr_passthrough	*_ap;	/* 60 bytes */
724     int				logical_drives_changed = 0;
725 
726     debug_called(1);
727 
728     arg._p = (void *)addr;
729 
730     error = 0;
731     dp = NULL;
732     ac = NULL;
733     _ap = NULL;
734 
735     switch(cmd) {
736 
737     case AMR_IO_VERSION:
738 	debug(1, "AMR_IO_VERSION");
739 	*arg.result = AMR_IO_VERSION_NUMBER;
740 	return(0);
741 
742 #ifdef AMR_IO_COMMAND32
743     /*
744      * Accept ioctl-s from 32-bit binaries on non-32-bit
745      * platforms, such as AMD. LSI's MEGAMGR utility is
746      * the only example known today...	-mi
747      */
748     case AMR_IO_COMMAND32:
749 	debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]);
750 	au_cmd = arg.au32->au_cmd;
751 	au_buffer = (void *)(u_int64_t)arg.au32->au_buffer;
752 	au_length = arg.au32->au_length;
753 	au_statusp = &arg.au32->au_status;
754 	break;
755 #endif
756 
757     case AMR_IO_COMMAND:
758 	debug(1, "AMR_IO_COMMAND  0x%x", arg.au->au_cmd[0]);
759 	au_cmd = arg.au->au_cmd;
760 	au_buffer = (void *)arg.au->au_buffer;
761 	au_length = arg.au->au_length;
762 	au_statusp = &arg.au->au_status;
763 	break;
764 
765     case 0xc0046d00:
766     case 0xc06e6d00:	/* Linux emulation */
767 	{
768 	    devclass_t			devclass;
769 	    struct amr_linux_ioctl	ali;
770 	    int				adapter, error;
771 
772 	    devclass = devclass_find("amr");
773 	    if (devclass == NULL)
774 		return (ENOENT);
775 
776 	    error = copyin(addr, &ali, sizeof(ali));
777 	    if (error)
778 		return (error);
779 	    if (ali.ui.fcs.opcode == 0x82)
780 		adapter = 0;
781 	    else
782 		adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
783 
784 	    sc = devclass_get_softc(devclass, adapter);
785 	    if (sc == NULL)
786 		return (ENOENT);
787 
788 	    return (amr_linux_ioctl_int(sc->amr_dev_t, cmd, addr, 0, ap->a_sysmsg));
789 	}
790     default:
791 	debug(1, "unknown ioctl 0x%lx", cmd);
792 	return(ENOIOCTL);
793     }
794 
795     if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) ||	/* delete */
796 	(au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) {		/* create */
797 	if (sc->amr_allow_vol_config == 0) {
798 	    error = EPERM;
799 	    goto out;
800 	}
801 	logical_drives_changed = 1;
802     }
803 
804     /* handle inbound data buffer */
805     real_length = amr_ioctl_buffer_length(au_length);
806     if (au_length != 0 && au_cmd[0] != 0x06) {
807 	if ((dp = kmalloc(real_length, M_AMR, M_WAITOK|M_ZERO)) == NULL) {
808 	    error = ENOMEM;
809 	    goto out;
810 	}
811 	if ((error = copyin(au_buffer, dp, au_length)) != 0) {
812 	    kfree(dp, M_AMR);
813 	    return (error);
814 	}
815 	debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp);
816     }
817 
818     /* Allocate this now before the mutex gets held */
819 
820     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
821     while ((ac = amr_alloccmd(sc)) == NULL)
822 	lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz);
823 
824     /* handle SCSI passthrough command */
825     if (au_cmd[0] == AMR_CMD_PASS) {
826         int len;
827 
828 	_ap = &ac->ac_ccb->ccb_pthru;
829 	bzero(_ap, sizeof(struct amr_passthrough));
830 
831 	/* copy cdb */
832         len = au_cmd[2];
833 	_ap->ap_cdb_length = len;
834 	bcopy(au_cmd + 3, _ap->ap_cdb, len);
835 
836 	/* build passthrough */
837 	_ap->ap_timeout		= au_cmd[len + 3] & 0x07;
838 	_ap->ap_ars		= (au_cmd[len + 3] & 0x08) ? 1 : 0;
839 	_ap->ap_islogical	= (au_cmd[len + 3] & 0x80) ? 1 : 0;
840 	_ap->ap_logical_drive_no = au_cmd[len + 4];
841 	_ap->ap_channel		= au_cmd[len + 5];
842 	_ap->ap_scsi_id 	= au_cmd[len + 6];
843 	_ap->ap_request_sense_length	= 14;
844 	_ap->ap_data_transfer_length	= au_length;
845 	/* XXX what about the request-sense area? does the caller want it? */
846 
847 	/* build command */
848 	ac->ac_mailbox.mb_command = AMR_CMD_PASS;
849 	ac->ac_flags = AMR_CMD_CCB;
850 
851     } else {
852 	/* direct command to controller */
853 	mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox;
854 
855 	/* copy pertinent mailbox items */
856 	mbi->mb_command = au_cmd[0];
857 	mbi->mb_channel = au_cmd[1];
858 	mbi->mb_param = au_cmd[2];
859 	mbi->mb_pad[0] = au_cmd[3];
860 	mbi->mb_drive = au_cmd[4];
861 	ac->ac_flags = 0;
862     }
863 
864     /* build the command */
865     ac->ac_data = dp;
866     ac->ac_length = real_length;
867     ac->ac_flags |= AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
868 
869     /* run the command */
870     error = amr_wait_command(ac);
871     lockmgr(&sc->amr_list_lock, LK_RELEASE);
872     if (error)
873 	goto out;
874 
875     /* copy out data and set status */
876     if (au_length != 0) {
877 	error = copyout(dp, au_buffer, au_length);
878     }
879     debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer);
880     if (dp != NULL)
881 	debug(2, "%p status 0x%x", dp, ac->ac_status);
882     *au_statusp = ac->ac_status;
883 
884 out:
885     /*
886      * At this point, we know that there is a lock held and that these
887      * objects have been allocated.
888      */
889     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
890     if (ac != NULL)
891 	amr_releasecmd(ac);
892     lockmgr(&sc->amr_list_lock, LK_RELEASE);
893     if (dp != NULL)
894 	kfree(dp, M_AMR);
895 
896     if (logical_drives_changed)
897 	amr_rescan_drives(dev);
898 
899     return(error);
900 }
901 
902 /********************************************************************************
903  ********************************************************************************
904                                                                  Command Wrappers
905  ********************************************************************************
906  ********************************************************************************/
907 
908 /********************************************************************************
909  * Interrogate the controller for the operational parameters we require.
910  */
911 static int
912 amr_query_controller(struct amr_softc *sc)
913 {
914     struct amr_enquiry3	*aex;
915     struct amr_prodinfo	*ap;
916     struct amr_enquiry	*ae;
917     int			ldrv;
918     int			status;
919 
920     /*
921      * Greater than 10 byte cdb support
922      */
923     sc->support_ext_cdb = amr_support_ext_cdb(sc);
924 
925     if(sc->support_ext_cdb) {
926 	debug(2,"supports extended CDBs.");
927     }
928 
929     /*
930      * Try to issue an ENQUIRY3 command
931      */
932     if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
933 			   AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) {
934 
935 	/*
936 	 * Fetch current state of logical drives.
937 	 */
938 	for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
939 	    sc->amr_drive[ldrv].al_size       = aex->ae_drivesize[ldrv];
940 	    sc->amr_drive[ldrv].al_state      = aex->ae_drivestate[ldrv];
941 	    sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv];
942 	    debug(2, "  drive %d: %d state %x properties %x", ldrv, sc->amr_drive[ldrv].al_size,
943 		  sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
944 	}
945 	kfree(aex, M_AMR);
946 
947 	/*
948 	 * Get product info for channel count.
949 	 */
950 	if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) {
951 	    device_printf(sc->amr_dev, "can't obtain product data from controller\n");
952 	    return(1);
953 	}
954 	sc->amr_maxdrives = 40;
955 	sc->amr_maxchan = ap->ap_nschan;
956 	sc->amr_maxio = ap->ap_maxio;
957 	sc->amr_type |= AMR_TYPE_40LD;
958 	kfree(ap, M_AMR);
959 
960 	ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status);
961 	if (ap != NULL)
962 	    kfree(ap, M_AMR);
963 	if (!status) {
964 	    sc->amr_ld_del_supported = 1;
965 	    device_printf(sc->amr_dev, "delete logical drives supported by controller\n");
966 	}
967     } else {
968 
969 	/* failed, try the 8LD ENQUIRY commands */
970 	if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) {
971 	    if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) {
972 		device_printf(sc->amr_dev, "can't obtain configuration data from controller\n");
973 		return(1);
974 	    }
975 	    ae->ae_signature = 0;
976 	}
977 
978 	/*
979 	 * Fetch current state of logical drives.
980 	 */
981 	for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
982 	    sc->amr_drive[ldrv].al_size       = ae->ae_ldrv.al_size[ldrv];
983 	    sc->amr_drive[ldrv].al_state      = ae->ae_ldrv.al_state[ldrv];
984 	    sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv];
985 	    debug(2, "  drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
986 		  sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
987 	}
988 
989 	sc->amr_maxdrives = 8;
990 	sc->amr_maxchan = ae->ae_adapter.aa_channels;
991 	sc->amr_maxio = ae->ae_adapter.aa_maxio;
992 	kfree(ae, M_AMR);
993     }
994 
995     /*
996      * Mark remaining drives as unused.
997      */
998     for (; ldrv < AMR_MAXLD; ldrv++)
999 	sc->amr_drive[ldrv].al_size = 0xffffffff;
1000 
1001     /*
1002      * Cap the maximum number of outstanding I/Os.  AMI's Linux driver doesn't trust
1003      * the controller's reported value, and lockups have been seen when we do.
1004      */
1005     sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD);
1006 
1007     return(0);
1008 }
1009 
1010 /********************************************************************************
1011  * Run a generic enquiry-style command.
1012  */
1013 static void *
1014 amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status)
1015 {
1016     struct amr_command	*ac;
1017     void		*result;
1018     u_int8_t		*mbox;
1019     int			error;
1020 
1021     debug_called(1);
1022 
1023     error = 1;
1024     result = NULL;
1025 
1026     /* get ourselves a command buffer */
1027     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1028     ac = amr_alloccmd(sc);
1029     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1030     if (ac == NULL)
1031 	goto out;
1032     /* allocate the response structure */
1033     if ((result = kmalloc(bufsize, M_AMR, M_ZERO|M_NOWAIT)) == NULL)
1034 	goto out;
1035     /* set command flags */
1036 
1037     ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN;
1038 
1039     /* point the command at our data */
1040     ac->ac_data = result;
1041     ac->ac_length = bufsize;
1042 
1043     /* build the command proper */
1044     mbox = (u_int8_t *)&ac->ac_mailbox;		/* XXX want a real structure for this? */
1045     mbox[0] = cmd;
1046     mbox[2] = cmdsub;
1047     mbox[3] = cmdqual;
1048     *status = 0;
1049 
1050     /* can't assume that interrupts are going to work here, so play it safe */
1051     if (sc->amr_poll_command(ac))
1052 	goto out;
1053     error = ac->ac_status;
1054     *status = ac->ac_status;
1055 
1056  out:
1057     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1058     if (ac != NULL)
1059 	amr_releasecmd(ac);
1060     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1061     if ((error != 0) && (result != NULL)) {
1062 	kfree(result, M_AMR);
1063 	result = NULL;
1064     }
1065     return(result);
1066 }
1067 
1068 /********************************************************************************
1069  * Flush the controller's internal cache, return status.
1070  */
1071 int
1072 amr_flush(struct amr_softc *sc)
1073 {
1074     struct amr_command	*ac;
1075     int			error;
1076 
1077     /* get ourselves a command buffer */
1078     error = 1;
1079     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1080     ac = amr_alloccmd(sc);
1081     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1082     if (ac == NULL)
1083 	goto out;
1084     /* set command flags */
1085     ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1086 
1087     /* build the command proper */
1088     ac->ac_mailbox.mb_command = AMR_CMD_FLUSH;
1089 
1090     /* we have to poll, as the system may be going down or otherwise damaged */
1091     if (sc->amr_poll_command(ac))
1092 	goto out;
1093     error = ac->ac_status;
1094 
1095  out:
1096     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1097     if (ac != NULL)
1098 	amr_releasecmd(ac);
1099     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1100     return(error);
1101 }
1102 
1103 /********************************************************************************
1104  * Detect extented cdb >> greater than 10 byte cdb support
1105  * returns '1' means this support exist
1106  * returns '0' means this support doesn't exist
1107  */
1108 static int
1109 amr_support_ext_cdb(struct amr_softc *sc)
1110 {
1111     struct amr_command	*ac;
1112     u_int8_t		*mbox;
1113     int			error;
1114 
1115     /* get ourselves a command buffer */
1116     error = 0;
1117     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1118     ac = amr_alloccmd(sc);
1119     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1120     if (ac == NULL)
1121 	goto out;
1122     /* set command flags */
1123     ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1124 
1125     /* build the command proper */
1126     mbox = (u_int8_t *)&ac->ac_mailbox;		/* XXX want a real structure for this? */
1127     mbox[0] = 0xA4;
1128     mbox[2] = 0x16;
1129 
1130 
1131     /* we have to poll, as the system may be going down or otherwise damaged */
1132     if (sc->amr_poll_command(ac))
1133 	goto out;
1134     if( ac->ac_status == AMR_STATUS_SUCCESS ) {
1135 	    error = 1;
1136     }
1137 
1138 out:
1139     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1140     if (ac != NULL)
1141 	amr_releasecmd(ac);
1142     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1143     return(error);
1144 }
1145 
1146 /********************************************************************************
1147  * Try to find I/O work for the controller from one or more of the work queues.
1148  *
1149  * We make the assumption that if the controller is not ready to take a command
1150  * at some given time, it will generate an interrupt at some later time when
1151  * it is.
1152  */
1153 void
1154 amr_startio(struct amr_softc *sc)
1155 {
1156     struct amr_command	*ac;
1157 
1158     /* spin until something prevents us from doing any work */
1159     for (;;) {
1160 
1161 	/* Don't bother to queue commands no bounce buffers are available. */
1162 	if (sc->amr_state & AMR_STATE_QUEUE_FRZN)
1163 	    break;
1164 
1165 	/* try to get a ready command */
1166 	ac = amr_dequeue_ready(sc);
1167 
1168 	/* if that failed, build a command from a bio */
1169 	if (ac == NULL)
1170 	    (void)amr_bio_command(sc, &ac);
1171 
1172 	/* if that failed, build a command from a ccb */
1173 	if ((ac == NULL) && (sc->amr_cam_command != NULL))
1174 	    sc->amr_cam_command(sc, &ac);
1175 
1176 	/* if we don't have anything to do, give up */
1177 	if (ac == NULL)
1178 	    break;
1179 
1180 	/* try to give the command to the controller; if this fails save it for later and give up */
1181 	if (amr_start(ac)) {
1182 	    debug(2, "controller busy, command deferred");
1183 	    amr_requeue_ready(ac);	/* XXX schedule retry very soon? */
1184 	    break;
1185 	}
1186     }
1187 }
1188 
1189 /********************************************************************************
1190  * Handle completion of an I/O command.
1191  */
1192 static void
1193 amr_completeio(struct amr_command *ac)
1194 {
1195     struct amr_softc		*sc = ac->ac_sc;
1196     static struct timeval	lastfail;
1197     static int			curfail;
1198     struct buf			*bp = ac->ac_bio->bio_buf;
1199 
1200     if (ac->ac_status != AMR_STATUS_SUCCESS) {	/* could be more verbose here? */
1201 	bp->b_error = EIO;
1202 	bp->b_flags |= B_ERROR;
1203 
1204 	if (ppsratecheck(&lastfail, &curfail, 1))
1205 	    device_printf(sc->amr_dev, "I/O error - 0x%x\n", ac->ac_status);
1206 /*	amr_printcommand(ac);*/
1207     }
1208     amrd_intr(ac->ac_bio);
1209     lockmgr(&ac->ac_sc->amr_list_lock, LK_EXCLUSIVE);
1210     amr_releasecmd(ac);
1211     lockmgr(&ac->ac_sc->amr_list_lock, LK_RELEASE);
1212 }
1213 
1214 /********************************************************************************
1215  ********************************************************************************
1216                                                                Command Processing
1217  ********************************************************************************
1218  ********************************************************************************/
1219 
1220 /********************************************************************************
1221  * Convert a bio off the top of the bio queue into a command.
1222  */
1223 static int
1224 amr_bio_command(struct amr_softc *sc, struct amr_command **acp)
1225 {
1226     struct amr_command	*ac;
1227     struct amrd_softc	*amrd;
1228     struct bio		*bio;
1229     struct buf		*bp;
1230     int			error;
1231     int			blkcount;
1232     int			driveno;
1233     int			cmd;
1234 
1235     ac = NULL;
1236     error = 0;
1237 
1238     /* get a command */
1239     if ((ac = amr_alloccmd(sc)) == NULL)
1240 	return (ENOMEM);
1241 
1242     /* get a bio to work on */
1243     if ((bio = amr_dequeue_bio(sc)) == NULL) {
1244 	amr_releasecmd(ac);
1245 	return (0);
1246     }
1247 
1248     /* connect the bio to the command */
1249     bp = bio->bio_buf;
1250     ac->ac_complete = amr_completeio;
1251     ac->ac_bio = bio;
1252     ac->ac_data = bp->b_data;
1253     ac->ac_length = bp->b_bcount;
1254     cmd = 0;
1255     switch (bp->b_cmd) {
1256     case BUF_CMD_READ:
1257 	ac->ac_flags |= AMR_CMD_DATAIN;
1258 	if (AMR_IS_SG64(sc)) {
1259 	    cmd = AMR_CMD_LREAD64;
1260 	    ac->ac_flags |= AMR_CMD_SG64;
1261 	} else
1262 	    cmd = AMR_CMD_LREAD;
1263 	break;
1264     case BUF_CMD_WRITE:
1265 	ac->ac_flags |= AMR_CMD_DATAOUT;
1266 	if (AMR_IS_SG64(sc)) {
1267 	    cmd = AMR_CMD_LWRITE64;
1268 	    ac->ac_flags |= AMR_CMD_SG64;
1269 	} else
1270 	    cmd = AMR_CMD_LWRITE;
1271 	break;
1272     case BUF_CMD_FLUSH:
1273 	ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1274 	cmd = AMR_CMD_FLUSH;
1275 	break;
1276     default:
1277 	panic("Invalid bio command");
1278     }
1279     amrd = (struct amrd_softc *)bio->bio_driver_info;
1280     driveno = amrd->amrd_drive - sc->amr_drive;
1281     blkcount = (bp->b_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE;
1282 
1283     ac->ac_mailbox.mb_command = cmd;
1284     if (bp->b_cmd & (BUF_CMD_READ|BUF_CMD_WRITE)) {
1285 	ac->ac_mailbox.mb_blkcount = blkcount;
1286 	ac->ac_mailbox.mb_lba = bio->bio_offset / AMR_BLKSIZE;
1287 	if (((bio->bio_offset / AMR_BLKSIZE) + blkcount) > sc->amr_drive[driveno].al_size) {
1288 	    device_printf(sc->amr_dev,
1289 			  "I/O beyond end of unit (%lld,%d > %lu)\n",
1290 			  (long long)(bio->bio_offset / AMR_BLKSIZE), blkcount,
1291 			  (u_long)sc->amr_drive[driveno].al_size);
1292 	}
1293     }
1294     ac->ac_mailbox.mb_drive = driveno;
1295     if (sc->amr_state & AMR_STATE_REMAP_LD)
1296 	ac->ac_mailbox.mb_drive |= 0x80;
1297 
1298     /* we fill in the s/g related data when the command is mapped */
1299 
1300 
1301     *acp = ac;
1302     return(error);
1303 }
1304 
1305 /********************************************************************************
1306  * Take a command, submit it to the controller and sleep until it completes
1307  * or fails.  Interrupts must be enabled, returns nonzero on error.
1308  */
1309 static int
1310 amr_wait_command(struct amr_command *ac)
1311 {
1312     int			error = 0;
1313     struct amr_softc	*sc = ac->ac_sc;
1314 
1315     debug_called(1);
1316 
1317     ac->ac_complete = NULL;
1318     ac->ac_flags |= AMR_CMD_SLEEP;
1319     if ((error = amr_start(ac)) != 0) {
1320 	return(error);
1321     }
1322 
1323     while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) {
1324 	error = lksleep(ac,&sc->amr_list_lock, 0, "amrwcmd", 0);
1325     }
1326 
1327     return(error);
1328 }
1329 
1330 /********************************************************************************
1331  * Take a command, submit it to the controller and busy-wait for it to return.
1332  * Returns nonzero on error.  Can be safely called with interrupts enabled.
1333  */
1334 static int
1335 amr_std_poll_command(struct amr_command *ac)
1336 {
1337     struct amr_softc	*sc = ac->ac_sc;
1338     int			error, count;
1339 
1340     debug_called(2);
1341 
1342     ac->ac_complete = NULL;
1343     if ((error = amr_start(ac)) != 0)
1344 	return(error);
1345 
1346     count = 0;
1347     do {
1348 	/*
1349 	 * Poll for completion, although the interrupt handler may beat us to it.
1350 	 * Note that the timeout here is somewhat arbitrary.
1351 	 */
1352 	amr_done(sc);
1353 	DELAY(1000);
1354     } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000));
1355     if (!(ac->ac_flags & AMR_CMD_BUSY)) {
1356 	error = 0;
1357     } else {
1358 	/* XXX the slot is now marked permanently busy */
1359 	error = EIO;
1360 	device_printf(sc->amr_dev, "polled command timeout\n");
1361     }
1362     return(error);
1363 }
1364 
1365 static void
1366 amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1367 {
1368     struct amr_command *ac = arg;
1369     struct amr_softc *sc = ac->ac_sc;
1370     int mb_channel;
1371 
1372     if (err) {
1373 	device_printf(sc->amr_dev, "error %d in %s", err, __func__);
1374 	ac->ac_status = AMR_STATUS_ABORTED;
1375 	return;
1376     }
1377 
1378     amr_setup_sg(arg, segs, nsegs, err);
1379 
1380     /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1381     mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1382     if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1383         ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1384         (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1385 	((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1386 
1387     ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1388     ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1389     if (AC_IS_SG64(ac)) {
1390 	ac->ac_sg64_hi = 0;
1391 	ac->ac_sg64_lo = ac->ac_sgbusaddr;
1392     }
1393 
1394     sc->amr_poll_command1(sc, ac);
1395 }
1396 
1397 /********************************************************************************
1398  * Take a command, submit it to the controller and busy-wait for it to return.
1399  * Returns nonzero on error.  Can be safely called with interrupts enabled.
1400  */
1401 static int
1402 amr_quartz_poll_command(struct amr_command *ac)
1403 {
1404     struct amr_softc	*sc = ac->ac_sc;
1405     int			error;
1406 
1407     debug_called(2);
1408 
1409     error = 0;
1410 
1411     if (AC_IS_SG64(ac)) {
1412 	ac->ac_tag = sc->amr_buffer64_dmat;
1413 	ac->ac_datamap = ac->ac_dma64map;
1414     } else {
1415 	ac->ac_tag = sc->amr_buffer_dmat;
1416 	ac->ac_datamap = ac->ac_dmamap;
1417     }
1418 
1419     /* now we have a slot, we can map the command (unmapped in amr_complete) */
1420     if (ac->ac_data != NULL && ac->ac_length != 0) {
1421 	if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1422 	    ac->ac_length, amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) {
1423 	    error = 1;
1424 	}
1425     } else {
1426 	error = amr_quartz_poll_command1(sc, ac);
1427     }
1428 
1429     return (error);
1430 }
1431 
1432 static int
1433 amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac)
1434 {
1435     int count, error;
1436 
1437     lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE);
1438     if ((sc->amr_state & AMR_STATE_INTEN) == 0) {
1439 	count=0;
1440 	while (sc->amr_busyslots) {
1441 	    lksleep(sc, &sc->amr_hw_lock, PCATCH, "amrpoll", hz);
1442 	    if(count++>10) {
1443 		break;
1444 	    }
1445 	}
1446 
1447 	if(sc->amr_busyslots) {
1448 	    device_printf(sc->amr_dev, "adapter is busy\n");
1449 	    lockmgr(&sc->amr_hw_lock, LK_RELEASE);
1450 	    if (ac->ac_data != NULL) {
1451 		bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1452 	    }
1453 	    ac->ac_status=0;
1454 	    return(1);
1455 	}
1456     }
1457 
1458     bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE);
1459 
1460     /* clear the poll/ack fields in the mailbox */
1461     sc->amr_mailbox->mb_ident = 0xFE;
1462     sc->amr_mailbox->mb_nstatus = 0xFF;
1463     sc->amr_mailbox->mb_status = 0xFF;
1464     sc->amr_mailbox->mb_poll = 0;
1465     sc->amr_mailbox->mb_ack = 0;
1466     sc->amr_mailbox->mb_busy = 1;
1467 
1468     AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
1469 
1470     while(sc->amr_mailbox->mb_nstatus == 0xFF)
1471 	DELAY(1);
1472     while(sc->amr_mailbox->mb_status == 0xFF)
1473 	DELAY(1);
1474     ac->ac_status=sc->amr_mailbox->mb_status;
1475     error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0;
1476     while(sc->amr_mailbox->mb_poll != 0x77)
1477 	DELAY(1);
1478     sc->amr_mailbox->mb_poll = 0;
1479     sc->amr_mailbox->mb_ack = 0x77;
1480 
1481     /* acknowledge that we have the commands */
1482     AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK);
1483     while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
1484 	DELAY(1);
1485     lockmgr(&sc->amr_hw_lock, LK_RELEASE);
1486 
1487     /* unmap the command's data buffer */
1488     if (ac->ac_flags & AMR_CMD_DATAIN) {
1489 	bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTREAD);
1490     }
1491     if (ac->ac_flags & AMR_CMD_DATAOUT) {
1492 	bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTWRITE);
1493     }
1494     bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1495 
1496     return(error);
1497 }
1498 
1499 static __inline int
1500 amr_freeslot(struct amr_command *ac)
1501 {
1502     struct amr_softc *sc = ac->ac_sc;
1503     int			slot;
1504 
1505     debug_called(3);
1506 
1507     slot = ac->ac_slot;
1508     if (sc->amr_busycmd[slot] == NULL)
1509 	panic("amr: slot %d not busy?", slot);
1510 
1511     sc->amr_busycmd[slot] = NULL;
1512     atomic_subtract_int(&sc->amr_busyslots, 1);
1513 
1514     return (0);
1515 }
1516 
1517 /********************************************************************************
1518  * Map/unmap (ac)'s data in the controller's addressable space as required.
1519  *
1520  * These functions may be safely called multiple times on a given command.
1521  */
1522 static void
1523 amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1524 {
1525     struct amr_command	*ac = (struct amr_command *)arg;
1526     struct amr_sgentry	*sg;
1527     struct amr_sg64entry *sg64;
1528     int flags, i;
1529 
1530     debug_called(3);
1531 
1532     /* get base address of s/g table */
1533     sg = ac->ac_sg.sg32;
1534     sg64 = ac->ac_sg.sg64;
1535 
1536     if (AC_IS_SG64(ac)) {
1537 	ac->ac_nsegments = nsegments;
1538 	ac->ac_mb_physaddr = 0xffffffff;
1539 	for (i = 0; i < nsegments; i++, sg64++) {
1540 	    sg64->sg_addr = segs[i].ds_addr;
1541 	    sg64->sg_count = segs[i].ds_len;
1542 	}
1543     } else {
1544 	/* decide whether we need to populate the s/g table */
1545 	if (nsegments < 2) {
1546 	    ac->ac_nsegments = 0;
1547 	    ac->ac_mb_physaddr = segs[0].ds_addr;
1548 	} else {
1549             ac->ac_nsegments = nsegments;
1550 	    ac->ac_mb_physaddr = ac->ac_sgbusaddr;
1551 	    for (i = 0; i < nsegments; i++, sg++) {
1552 		sg->sg_addr = segs[i].ds_addr;
1553 		sg->sg_count = segs[i].ds_len;
1554 	    }
1555 	}
1556     }
1557 
1558     flags = 0;
1559     if (ac->ac_flags & AMR_CMD_DATAIN)
1560 	flags |= BUS_DMASYNC_PREREAD;
1561     if (ac->ac_flags & AMR_CMD_DATAOUT)
1562 	flags |= BUS_DMASYNC_PREWRITE;
1563     bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flags);
1564     ac->ac_flags |= AMR_CMD_MAPPED;
1565 }
1566 
1567 static void
1568 amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1569 {
1570     struct amr_command *ac = arg;
1571     struct amr_softc *sc = ac->ac_sc;
1572     int mb_channel;
1573 
1574     if (err) {
1575 	device_printf(sc->amr_dev, "error %d in %s", err, __func__);
1576 	amr_abort_load(ac);
1577 	return;
1578     }
1579 
1580     amr_setup_sg(arg, segs, nsegs, err);
1581 
1582     /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1583     mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1584     if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1585         ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1586         (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1587 	((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1588 
1589     ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1590     ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1591     if (AC_IS_SG64(ac)) {
1592 	ac->ac_sg64_hi = 0;
1593 	ac->ac_sg64_lo = ac->ac_sgbusaddr;
1594     }
1595 
1596     if (sc->amr_submit_command(ac) == EBUSY) {
1597 	amr_freeslot(ac);
1598 	amr_requeue_ready(ac);
1599     }
1600 }
1601 
1602 static void
1603 amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1604 {
1605     struct amr_command *ac = arg;
1606     struct amr_softc *sc = ac->ac_sc;
1607     struct amr_passthrough *ap = &ac->ac_ccb->ccb_pthru;
1608     struct amr_ext_passthrough *aep = &ac->ac_ccb->ccb_epthru;
1609 
1610     if (err) {
1611 	device_printf(sc->amr_dev, "error %d in %s", err, __func__);
1612 	amr_abort_load(ac);
1613 	return;
1614     }
1615 
1616     /* Set up the mailbox portion of the command to point at the ccb */
1617     ac->ac_mailbox.mb_nsgelem = 0;
1618     ac->ac_mailbox.mb_physaddr = ac->ac_ccb_busaddr;
1619 
1620     amr_setup_sg(arg, segs, nsegs, err);
1621 
1622     switch (ac->ac_mailbox.mb_command) {
1623     case AMR_CMD_EXTPASS:
1624 	aep->ap_no_sg_elements = ac->ac_nsegments;
1625 	aep->ap_data_transfer_address = ac->ac_mb_physaddr;
1626         break;
1627     case AMR_CMD_PASS:
1628 	ap->ap_no_sg_elements = ac->ac_nsegments;
1629 	ap->ap_data_transfer_address = ac->ac_mb_physaddr;
1630 	break;
1631     default:
1632 	panic("Unknown ccb command");
1633     }
1634 
1635     if (sc->amr_submit_command(ac) == EBUSY) {
1636 	amr_freeslot(ac);
1637 	amr_requeue_ready(ac);
1638     }
1639 }
1640 
1641 static int
1642 amr_mapcmd(struct amr_command *ac)
1643 {
1644     bus_dmamap_callback_t *cb;
1645     struct amr_softc	*sc = ac->ac_sc;
1646 
1647     debug_called(3);
1648 
1649     if (AC_IS_SG64(ac)) {
1650 	ac->ac_tag = sc->amr_buffer64_dmat;
1651 	ac->ac_datamap = ac->ac_dma64map;
1652     } else {
1653 	ac->ac_tag = sc->amr_buffer_dmat;
1654 	ac->ac_datamap = ac->ac_dmamap;
1655     }
1656 
1657     if (ac->ac_flags & AMR_CMD_CCB)
1658 	cb = amr_setup_ccb;
1659     else
1660 	cb = amr_setup_data;
1661 
1662     /* if the command involves data at all, and hasn't been mapped */
1663     if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) {
1664 	/* map the data buffers into bus space and build the s/g list */
1665 	if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1666 	     ac->ac_length, cb, ac, 0) == EINPROGRESS) {
1667 	    sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1668 	}
1669    } else {
1670 	if (sc->amr_submit_command(ac) == EBUSY) {
1671 	    amr_freeslot(ac);
1672 	    amr_requeue_ready(ac);
1673 	}
1674    }
1675 
1676     return (0);
1677 }
1678 
1679 static void
1680 amr_unmapcmd(struct amr_command *ac)
1681 {
1682     int			flag;
1683 
1684     debug_called(3);
1685 
1686     /* if the command involved data at all and was mapped */
1687     if (ac->ac_flags & AMR_CMD_MAPPED) {
1688 
1689 	if (ac->ac_data != NULL) {
1690 
1691 	    flag = 0;
1692 	    if (ac->ac_flags & AMR_CMD_DATAIN)
1693 		flag |= BUS_DMASYNC_POSTREAD;
1694 	    if (ac->ac_flags & AMR_CMD_DATAOUT)
1695 		flag |= BUS_DMASYNC_POSTWRITE;
1696 
1697 	    bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flag);
1698 	    bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1699 	}
1700 
1701 	ac->ac_flags &= ~AMR_CMD_MAPPED;
1702     }
1703 }
1704 
1705 static void
1706 amr_abort_load(struct amr_command *ac)
1707 {
1708     ac_qhead_t head;
1709     struct amr_softc *sc = ac->ac_sc;
1710 
1711     KKASSERT(lockstatus(&sc->amr_list_lock, curthread) != 0);
1712 
1713     ac->ac_status = AMR_STATUS_ABORTED;
1714     amr_init_qhead(&head);
1715     amr_enqueue_completed(ac, &head);
1716 
1717     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1718     amr_complete(sc, &head);
1719     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1720 }
1721 
1722 /********************************************************************************
1723  * Take a command and give it to the controller, returns 0 if successful, or
1724  * EBUSY if the command should be retried later.
1725  */
1726 static int
1727 amr_start(struct amr_command *ac)
1728 {
1729     struct amr_softc *sc;
1730     int error = 0;
1731     int slot;
1732 
1733     debug_called(3);
1734 
1735     /* mark command as busy so that polling consumer can tell */
1736     sc = ac->ac_sc;
1737     ac->ac_flags |= AMR_CMD_BUSY;
1738 
1739     /* get a command slot (freed in amr_done) */
1740     slot = ac->ac_slot;
1741     if (sc->amr_busycmd[slot] != NULL)
1742 	panic("amr: slot %d busy?", slot);
1743     sc->amr_busycmd[slot] = ac;
1744     atomic_add_int(&sc->amr_busyslots, 1);
1745 
1746     /* Now we have a slot, we can map the command (unmapped in amr_complete). */
1747     if ((error = amr_mapcmd(ac)) == ENOMEM) {
1748 	/*
1749 	 * Memroy resources are short, so free the slot and let this be tried
1750 	 * later.
1751 	 */
1752 	amr_freeslot(ac);
1753     }
1754 
1755     return (error);
1756 }
1757 
1758 /********************************************************************************
1759  * Extract one or more completed commands from the controller (sc)
1760  *
1761  * Returns nonzero if any commands on the work queue were marked as completed.
1762  */
1763 
1764 int
1765 amr_done(struct amr_softc *sc)
1766 {
1767     ac_qhead_t		head;
1768     struct amr_command	*ac;
1769     struct amr_mailbox	mbox;
1770     int			i, idx, result;
1771 
1772     debug_called(3);
1773 
1774     /* See if there's anything for us to do */
1775     result = 0;
1776     amr_init_qhead(&head);
1777 
1778     /* loop collecting completed commands */
1779     for (;;) {
1780 	/* poll for a completed command's identifier and status */
1781 	if (sc->amr_get_work(sc, &mbox)) {
1782 	    result = 1;
1783 
1784 	    /* iterate over completed commands in this result */
1785 	    for (i = 0; i < mbox.mb_nstatus; i++) {
1786 		/* get pointer to busy command */
1787 		idx = mbox.mb_completed[i] - 1;
1788 		ac = sc->amr_busycmd[idx];
1789 
1790 		/* really a busy command? */
1791 		if (ac != NULL) {
1792 
1793 		    /* pull the command from the busy index */
1794 		    amr_freeslot(ac);
1795 
1796 		    /* save status for later use */
1797 		    ac->ac_status = mbox.mb_status;
1798 		    amr_enqueue_completed(ac, &head);
1799 		    debug(3, "completed command with status %x", mbox.mb_status);
1800 		} else {
1801 		    device_printf(sc->amr_dev, "bad slot %d completed\n", idx);
1802 		}
1803 	    }
1804 	} else
1805 	    break;	/* no work */
1806     }
1807 
1808     /* handle completion and timeouts */
1809     amr_complete(sc, &head);
1810 
1811     return(result);
1812 }
1813 
1814 /********************************************************************************
1815  * Do completion processing on done commands on (sc)
1816  */
1817 
1818 static void
1819 amr_complete(void *context, ac_qhead_t *head)
1820 {
1821     struct amr_softc	*sc = (struct amr_softc *)context;
1822     struct amr_command	*ac;
1823 
1824     debug_called(3);
1825 
1826     /* pull completed commands off the queue */
1827     for (;;) {
1828 	ac = amr_dequeue_completed(sc, head);
1829 	if (ac == NULL)
1830 	    break;
1831 
1832 	/* unmap the command's data buffer */
1833 	amr_unmapcmd(ac);
1834 
1835 	/*
1836 	 * Is there a completion handler?
1837 	 */
1838 	if (ac->ac_complete != NULL) {
1839 	    /* unbusy the command */
1840 	    ac->ac_flags &= ~AMR_CMD_BUSY;
1841 	    ac->ac_complete(ac);
1842 
1843 	    /*
1844 	     * Is someone sleeping on this one?
1845 	     */
1846 	} else {
1847 	    lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1848 	    ac->ac_flags &= ~AMR_CMD_BUSY;
1849 	    if (ac->ac_flags & AMR_CMD_SLEEP) {
1850 		/* unbusy the command */
1851 		wakeup(ac);
1852 	    }
1853 	    lockmgr(&sc->amr_list_lock, LK_RELEASE);
1854 	}
1855 
1856 	if(!sc->amr_busyslots) {
1857 	    wakeup(sc);
1858 	}
1859     }
1860 
1861     lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1862     sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
1863     amr_startio(sc);
1864     lockmgr(&sc->amr_list_lock, LK_RELEASE);
1865 }
1866 
1867 /********************************************************************************
1868  ********************************************************************************
1869                                                         Command Buffer Management
1870  ********************************************************************************
1871  ********************************************************************************/
1872 
1873 /********************************************************************************
1874  * Get a new command buffer.
1875  *
1876  * This may return NULL in low-memory cases.
1877  *
1878  * If possible, we recycle a command buffer that's been used before.
1879  */
1880 struct amr_command *
1881 amr_alloccmd(struct amr_softc *sc)
1882 {
1883     struct amr_command	*ac;
1884 
1885     debug_called(3);
1886 
1887     ac = amr_dequeue_free(sc);
1888     if (ac == NULL) {
1889 	sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1890 	return(NULL);
1891     }
1892 
1893     /* clear out significant fields */
1894     ac->ac_status = 0;
1895     bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox));
1896     ac->ac_flags = 0;
1897     ac->ac_bio = NULL;
1898     ac->ac_data = NULL;
1899     ac->ac_complete = NULL;
1900     ac->ac_retries = 0;
1901     ac->ac_tag = NULL;
1902     ac->ac_datamap = NULL;
1903     return(ac);
1904 }
1905 
1906 /********************************************************************************
1907  * Release a command buffer for recycling.
1908  */
1909 void
1910 amr_releasecmd(struct amr_command *ac)
1911 {
1912     debug_called(3);
1913 
1914     amr_enqueue_free(ac);
1915 }
1916 
1917 /********************************************************************************
1918  * Allocate a new command cluster and initialise it.
1919  */
1920 static void
1921 amr_alloccmd_cluster(struct amr_softc *sc)
1922 {
1923     struct amr_command_cluster	*acc;
1924     struct amr_command		*ac;
1925     int				i, nextslot;
1926 
1927     /*
1928      * If we haven't found the real limit yet, let us have a couple of
1929      * commands in order to be able to probe.
1930      */
1931     if (sc->amr_maxio == 0)
1932 	sc->amr_maxio = 2;
1933 
1934     if (sc->amr_nextslot > sc->amr_maxio)
1935 	return;
1936     acc = kmalloc(AMR_CMD_CLUSTERSIZE, M_AMR, M_NOWAIT | M_ZERO);
1937     if (acc != NULL) {
1938 	nextslot = sc->amr_nextslot;
1939 	lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1940 	TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link);
1941 	lockmgr(&sc->amr_list_lock, LK_RELEASE);
1942 	for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
1943 	    ac = &acc->acc_command[i];
1944 	    ac->ac_sc = sc;
1945 	    ac->ac_slot = nextslot;
1946 
1947 	    /*
1948 	     * The SG table for each slot is a fixed size and is assumed to
1949 	     * to hold 64-bit s/g objects when the driver is configured to do
1950 	     * 64-bit DMA.  32-bit DMA commands still use the same table, but
1951 	     * cast down to 32-bit objects.
1952 	     */
1953 	    if (AMR_IS_SG64(sc)) {
1954 		ac->ac_sgbusaddr = sc->amr_sgbusaddr +
1955 		    (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry));
1956 	        ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG);
1957 	    } else {
1958 		ac->ac_sgbusaddr = sc->amr_sgbusaddr +
1959 		    (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry));
1960 	        ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
1961 	    }
1962 
1963 	    ac->ac_ccb = sc->amr_ccb + ac->ac_slot;
1964 	    ac->ac_ccb_busaddr = sc->amr_ccb_busaddr +
1965 		(ac->ac_slot * sizeof(union amr_ccb));
1966 
1967 	    if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap))
1968 		break;
1969 	    if (AMR_IS_SG64(sc) &&
1970 		(bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map)))
1971 		break;
1972 	    amr_releasecmd(ac);
1973 	    if (++nextslot > sc->amr_maxio)
1974 		break;
1975 	}
1976 	sc->amr_nextslot = nextslot;
1977     }
1978 }
1979 
1980 /********************************************************************************
1981  * Free a command cluster
1982  */
1983 static void
1984 amr_freecmd_cluster(struct amr_command_cluster *acc)
1985 {
1986     struct amr_softc	*sc = acc->acc_command[0].ac_sc;
1987     int			i;
1988 
1989     for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
1990 	if (acc->acc_command[i].ac_sc == NULL)
1991 	    break;
1992 	bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap);
1993 	if (AMR_IS_SG64(sc))
1994 		bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map);
1995     }
1996     kfree(acc, M_AMR);
1997 }
1998 
1999 /********************************************************************************
2000  ********************************************************************************
2001                                                          Interface-specific Shims
2002  ********************************************************************************
2003  ********************************************************************************/
2004 
2005 /********************************************************************************
2006  * Tell the controller that the mailbox contains a valid command
2007  */
2008 static int
2009 amr_quartz_submit_command(struct amr_command *ac)
2010 {
2011     struct amr_softc	*sc = ac->ac_sc;
2012     static struct timeval lastfail;
2013     static int		curfail;
2014     int			i = 0;
2015 
2016     lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE);
2017     while (sc->amr_mailbox->mb_busy && (i++ < 10)) {
2018         DELAY(1);
2019 	/* This is a no-op read that flushes pending mailbox updates */
2020 	AMR_QGET_ODB(sc);
2021     }
2022     if (sc->amr_mailbox->mb_busy) {
2023 	lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2024 	if (ac->ac_retries++ > 1000) {
2025 	    if (ppsratecheck(&lastfail, &curfail, 1))
2026 		device_printf(sc->amr_dev, "Too many retries on command %p.  "
2027 			      "Controller is likely dead\n", ac);
2028 	    ac->ac_retries = 0;
2029 	}
2030 	return (EBUSY);
2031     }
2032 
2033     /*
2034      * Save the slot number so that we can locate this command when complete.
2035      * Note that ident = 0 seems to be special, so we don't use it.
2036      */
2037     ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2038     bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2039     sc->amr_mailbox->mb_busy = 1;
2040     sc->amr_mailbox->mb_poll = 0;
2041     sc->amr_mailbox->mb_ack  = 0;
2042     sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi;
2043     sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo;
2044 
2045     AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
2046     lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2047     return(0);
2048 }
2049 
2050 static int
2051 amr_std_submit_command(struct amr_command *ac)
2052 {
2053     struct amr_softc	*sc = ac->ac_sc;
2054     static struct timeval lastfail;
2055     static int		curfail;
2056 
2057     lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE);
2058     if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) {
2059 	lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2060 	if (ac->ac_retries++ > 1000) {
2061 	    if (ppsratecheck(&lastfail, &curfail, 1))
2062 		device_printf(sc->amr_dev, "Too many retries on command %p.  "
2063 			      "Controller is likely dead\n", ac);
2064 	    ac->ac_retries = 0;
2065 	}
2066 	return (EBUSY);
2067     }
2068 
2069     /*
2070      * Save the slot number so that we can locate this command when complete.
2071      * Note that ident = 0 seems to be special, so we don't use it.
2072      */
2073     ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2074     bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2075     sc->amr_mailbox->mb_busy = 1;
2076     sc->amr_mailbox->mb_poll = 0;
2077     sc->amr_mailbox->mb_ack  = 0;
2078 
2079     AMR_SPOST_COMMAND(sc);
2080     lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2081     return(0);
2082 }
2083 
2084 /********************************************************************************
2085  * Claim any work that the controller has completed; acknowledge completion,
2086  * save details of the completion in (mbsave)
2087  */
2088 static int
2089 amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2090 {
2091     int		worked, i;
2092     u_int32_t	outd;
2093     u_int8_t	nstatus;
2094     u_int8_t	completed[46];
2095 
2096     debug_called(3);
2097 
2098     worked = 0;
2099 
2100     /* work waiting for us? */
2101     if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) {
2102 
2103 	/* acknowledge interrupt */
2104 	AMR_QPUT_ODB(sc, AMR_QODB_READY);
2105 
2106 	while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff)
2107 	    DELAY(1);
2108 	sc->amr_mailbox->mb_nstatus = 0xff;
2109 
2110 	/* wait until fw wrote out all completions */
2111 	for (i = 0; i < nstatus; i++) {
2112 	    while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff)
2113 		DELAY(1);
2114 	    sc->amr_mailbox->mb_completed[i] = 0xff;
2115 	}
2116 
2117 	/* Save information for later processing */
2118 	mbsave->mb_nstatus = nstatus;
2119 	mbsave->mb_status = sc->amr_mailbox->mb_status;
2120 	sc->amr_mailbox->mb_status = 0xff;
2121 
2122 	for (i = 0; i < nstatus; i++)
2123 	    mbsave->mb_completed[i] = completed[i];
2124 
2125 	/* acknowledge that we have the commands */
2126 	AMR_QPUT_IDB(sc, AMR_QIDB_ACK);
2127 
2128 #if 0
2129 #ifndef AMR_QUARTZ_GOFASTER
2130 	/*
2131 	 * This waits for the controller to notice that we've taken the
2132 	 * command from it.  It's very inefficient, and we shouldn't do it,
2133 	 * but if we remove this code, we stop completing commands under
2134 	 * load.
2135 	 *
2136 	 * Peter J says we shouldn't do this.  The documentation says we
2137 	 * should.  Who is right?
2138 	 */
2139 	while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
2140 	    ;				/* XXX aiee! what if it dies? */
2141 #endif
2142 #endif
2143 
2144 	worked = 1;			/* got some work */
2145     }
2146 
2147     return(worked);
2148 }
2149 
2150 static int
2151 amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2152 {
2153     int		worked;
2154     u_int8_t	istat;
2155 
2156     debug_called(3);
2157 
2158     worked = 0;
2159 
2160     /* check for valid interrupt status */
2161     istat = AMR_SGET_ISTAT(sc);
2162     if ((istat & AMR_SINTR_VALID) != 0) {
2163 	AMR_SPUT_ISTAT(sc, istat);	/* ack interrupt status */
2164 
2165 	/* save mailbox, which contains a list of completed commands */
2166 	bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave));
2167 
2168 	AMR_SACK_INTERRUPT(sc);		/* acknowledge we have the mailbox */
2169 	worked = 1;
2170     }
2171 
2172     return(worked);
2173 }
2174 
2175 /********************************************************************************
2176  * Notify the controller of the mailbox location.
2177  */
2178 static void
2179 amr_std_attach_mailbox(struct amr_softc *sc)
2180 {
2181 
2182     /* program the mailbox physical address */
2183     AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys         & 0xff);
2184     AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >>  8) & 0xff);
2185     AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff);
2186     AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff);
2187     AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR);
2188 
2189     /* clear any outstanding interrupt and enable interrupts proper */
2190     AMR_SACK_INTERRUPT(sc);
2191     AMR_SENABLE_INTR(sc);
2192 }
2193 
2194 #ifdef AMR_BOARD_INIT
2195 /********************************************************************************
2196  * Initialise the controller
2197  */
2198 static int
2199 amr_quartz_init(struct amr_softc *sc)
2200 {
2201     int		status, ostatus;
2202 
2203     device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc));
2204 
2205     AMR_QRESET(sc);
2206 
2207     ostatus = 0xff;
2208     while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) {
2209 	if (status != ostatus) {
2210 	    device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status));
2211 	    ostatus = status;
2212 	}
2213 	switch (status) {
2214 	case AMR_QINIT_NOMEM:
2215 	    return(ENOMEM);
2216 
2217 	case AMR_QINIT_SCAN:
2218 	    /* XXX we could print channel/target here */
2219 	    break;
2220 	}
2221     }
2222     return(0);
2223 }
2224 
2225 static int
2226 amr_std_init(struct amr_softc *sc)
2227 {
2228     int		status, ostatus;
2229 
2230     device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc));
2231 
2232     AMR_SRESET(sc);
2233 
2234     ostatus = 0xff;
2235     while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) {
2236 	if (status != ostatus) {
2237 	    device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status));
2238 	    ostatus = status;
2239 	}
2240 	switch (status) {
2241 	case AMR_SINIT_NOMEM:
2242 	    return(ENOMEM);
2243 
2244 	case AMR_SINIT_INPROG:
2245 	    /* XXX we could print channel/target here? */
2246 	    break;
2247 	}
2248     }
2249     return(0);
2250 }
2251 #endif
2252 
2253 /********************************************************************************
2254  ********************************************************************************
2255                                                                         Debugging
2256  ********************************************************************************
2257  ********************************************************************************/
2258 
2259 /********************************************************************************
2260  * Identify the controller and print some information about it.
2261  */
2262 static void
2263 amr_describe_controller(struct amr_softc *sc)
2264 {
2265     struct amr_prodinfo	*ap;
2266     struct amr_enquiry	*ae;
2267     char		*prod;
2268     int			status;
2269 
2270     /*
2271      * Try to get 40LD product info, which tells us what the card is labelled as.
2272      */
2273     if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) {
2274 	device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n",
2275 		      ap->ap_product, ap->ap_firmware, ap->ap_bios,
2276 		      ap->ap_memsize);
2277 
2278 	kfree(ap, M_AMR);
2279 	return;
2280     }
2281 
2282     /*
2283      * Try 8LD extended ENQUIRY to get controller signature, and use lookup table.
2284      */
2285     if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) {
2286 	prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature);
2287 
2288     } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) {
2289 
2290 	/*
2291 	 * Try to work it out based on the PCI signatures.
2292 	 */
2293 	switch (pci_get_device(sc->amr_dev)) {
2294 	case 0x9010:
2295 	    prod = "Series 428";
2296 	    break;
2297 	case 0x9060:
2298 	    prod = "Series 434";
2299 	    break;
2300 	default:
2301 	    prod = "unknown controller";
2302 	    break;
2303 	}
2304     } else {
2305 	device_printf(sc->amr_dev, "<unsupported controller>\n");
2306 	return;
2307     }
2308 
2309     /*
2310      * HP NetRaid controllers have a special encoding of the firmware and
2311      * BIOS versions. The AMI version seems to have it as strings whereas
2312      * the HP version does it with a leading uppercase character and two
2313      * binary numbers.
2314      */
2315 
2316     if(ae->ae_adapter.aa_firmware[2] >= 'A' &&
2317        ae->ae_adapter.aa_firmware[2] <= 'Z' &&
2318        ae->ae_adapter.aa_firmware[1] <  ' ' &&
2319        ae->ae_adapter.aa_firmware[0] <  ' ' &&
2320        ae->ae_adapter.aa_bios[2] >= 'A'     &&
2321        ae->ae_adapter.aa_bios[2] <= 'Z'     &&
2322        ae->ae_adapter.aa_bios[1] <  ' '     &&
2323        ae->ae_adapter.aa_bios[0] <  ' ') {
2324 
2325 	/* this looks like we have an HP NetRaid version of the MegaRaid */
2326 
2327     	if(ae->ae_signature == AMR_SIG_438) {
2328     		/* the AMI 438 is a NetRaid 3si in HP-land */
2329     		prod = "HP NetRaid 3si";
2330     	}
2331 
2332 	device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n",
2333 		      prod, ae->ae_adapter.aa_firmware[2],
2334 		      ae->ae_adapter.aa_firmware[1],
2335 		      ae->ae_adapter.aa_firmware[0],
2336 		      ae->ae_adapter.aa_bios[2],
2337 		      ae->ae_adapter.aa_bios[1],
2338 		      ae->ae_adapter.aa_bios[0],
2339 		      ae->ae_adapter.aa_memorysize);
2340     } else {
2341 	device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n",
2342 		      prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios,
2343 		      ae->ae_adapter.aa_memorysize);
2344     }
2345     kfree(ae, M_AMR);
2346 }
2347 
2348 int
2349 amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks)
2350 {
2351     struct amr_command	*ac;
2352     int			error = EIO;
2353 
2354     debug_called(1);
2355 
2356     sc->amr_state |= AMR_STATE_INTEN;
2357 
2358     /* get ourselves a command buffer */
2359     if ((ac = amr_alloccmd(sc)) == NULL)
2360 	goto out;
2361     /* set command flags */
2362     ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
2363 
2364     /* point the command at our data */
2365     ac->ac_data = data;
2366     ac->ac_length = blks * AMR_BLKSIZE;
2367 
2368     /* build the command proper */
2369     ac->ac_mailbox.mb_command 	= AMR_CMD_LWRITE;
2370     ac->ac_mailbox.mb_blkcount	= blks;
2371     ac->ac_mailbox.mb_lba	= lba;
2372     ac->ac_mailbox.mb_drive	= unit;
2373 
2374     /* can't assume that interrupts are going to work here, so play it safe */
2375     if (sc->amr_poll_command(ac))
2376 	goto out;
2377     error = ac->ac_status;
2378 
2379  out:
2380     if (ac != NULL)
2381 	amr_releasecmd(ac);
2382 
2383     sc->amr_state &= ~AMR_STATE_INTEN;
2384     return (error);
2385 }
2386 
2387 
2388 
2389 #ifdef AMR_DEBUG
2390 /********************************************************************************
2391  * Print the command (ac) in human-readable format
2392  */
2393 #if 0
2394 static void
2395 amr_printcommand(struct amr_command *ac)
2396 {
2397     struct amr_softc	*sc = ac->ac_sc;
2398     struct amr_sgentry	*sg;
2399     int			i;
2400 
2401     device_printf(sc->amr_dev, "cmd %x  ident %d  drive %d\n",
2402 		  ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive);
2403     device_printf(sc->amr_dev, "blkcount %d  lba %d\n",
2404 		  ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba);
2405     device_printf(sc->amr_dev, "virtaddr %p  length %lu\n", ac->ac_data, (unsigned long)ac->ac_length);
2406     device_printf(sc->amr_dev, "sg physaddr %08x  nsg %d\n",
2407 		  ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem);
2408     device_printf(sc->amr_dev, "ccb %p  bio %p\n", ac->ac_ccb_data, ac->ac_bio);
2409 
2410     /* get base address of s/g table */
2411     sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2412     for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++)
2413 	device_printf(sc->amr_dev, "  %x/%d\n", sg->sg_addr, sg->sg_count);
2414 }
2415 #endif
2416 #endif
2417