1 /*-
2 * Copyright (c) 1999,2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * Copyright (c) 2005 Scott Long
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28 /*-
29 * Copyright (c) 2002 Eric Moore
30 * Copyright (c) 2002, 2004 LSI Logic Corporation
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. The party using or redistributing the source code and binary forms
42 * agrees to the disclaimer below and the terms and conditions set forth
43 * herein.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 *
57 * $FreeBSD: src/sys/dev/amr/amr.c,v 1.99 2012/08/31 09:42:46 scottl Exp $
58 */
59
60 /*
61 * Driver for the AMI MegaRaid family of controllers.
62 */
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/malloc.h>
67 #include <sys/kernel.h>
68 #include <sys/proc.h>
69 #include <sys/sysctl.h>
70 #include <sys/sysmsg.h>
71
72 #include <sys/bio.h>
73 #include <sys/bus.h>
74 #include <sys/conf.h>
75 #include <sys/stat.h>
76
77 #include <machine/cpu.h>
78 #include <sys/rman.h>
79
80 #include <bus/pci/pcireg.h>
81 #include <bus/pci/pcivar.h>
82
83 #include <dev/raid/amr/amrio.h>
84 #include <dev/raid/amr/amrreg.h>
85 #include <dev/raid/amr/amrvar.h>
86 #define AMR_DEFINE_TABLES
87 #include <dev/raid/amr/amr_tables.h>
88
89 SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD, 0, "AMR driver parameters");
90
91 static d_open_t amr_open;
92 static d_close_t amr_close;
93 static d_ioctl_t amr_ioctl;
94
95 static struct dev_ops amr_ops = {
96 { "amr", 0, 0 },
97 .d_open = amr_open,
98 .d_close = amr_close,
99 .d_ioctl = amr_ioctl,
100 };
101
102 int linux_no_adapter = 0;
103 /*
104 * Initialisation, bus interface.
105 */
106 static void amr_startup(void *arg);
107
108 /*
109 * Command wrappers
110 */
111 static int amr_query_controller(struct amr_softc *sc);
112 static void *amr_enquiry(struct amr_softc *sc, size_t bufsize,
113 u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status);
114 static void amr_completeio(struct amr_command *ac);
115 static int amr_support_ext_cdb(struct amr_softc *sc);
116
117 /*
118 * Command buffer allocation.
119 */
120 static void amr_alloccmd_cluster(struct amr_softc *sc);
121 static void amr_freecmd_cluster(struct amr_command_cluster *acc);
122
123 /*
124 * Command processing.
125 */
126 static int amr_bio_command(struct amr_softc *sc, struct amr_command **acp);
127 static int amr_wait_command(struct amr_command *ac);
128 static int amr_mapcmd(struct amr_command *ac);
129 static void amr_unmapcmd(struct amr_command *ac);
130 static int amr_start(struct amr_command *ac);
131 static void amr_complete(void *context, ac_qhead_t *head);
132 static void amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
133 static void amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
134 static void amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
135 static void amr_abort_load(struct amr_command *ac);
136
137 /*
138 * Interface-specific shims
139 */
140 static int amr_quartz_submit_command(struct amr_command *ac);
141 static int amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
142 static int amr_quartz_poll_command(struct amr_command *ac);
143 static int amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac);
144
145 static int amr_std_submit_command(struct amr_command *ac);
146 static int amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
147 static int amr_std_poll_command(struct amr_command *ac);
148 static void amr_std_attach_mailbox(struct amr_softc *sc);
149
150 #ifdef AMR_BOARD_INIT
151 static int amr_quartz_init(struct amr_softc *sc);
152 static int amr_std_init(struct amr_softc *sc);
153 #endif
154
155 /*
156 * Debugging
157 */
158 static void amr_describe_controller(struct amr_softc *sc);
159 #ifdef AMR_DEBUG
160 #if 0
161 static void amr_printcommand(struct amr_command *ac);
162 #endif
163 #endif
164
165 static void amr_init_sysctl(struct amr_softc *sc);
166 static int amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr,
167 int32_t flag, struct sysmsg *sm);
168
169 static MALLOC_DEFINE(M_AMR, "amr", "AMR memory");
170
171 /********************************************************************************
172 ********************************************************************************
173 Inline Glue
174 ********************************************************************************
175 ********************************************************************************/
176
177 /********************************************************************************
178 ********************************************************************************
179 Public Interfaces
180 ********************************************************************************
181 ********************************************************************************/
182
183 /********************************************************************************
184 * Initialise the controller and softc.
185 */
186 int
amr_attach(struct amr_softc * sc)187 amr_attach(struct amr_softc *sc)
188 {
189 device_t child;
190
191 debug_called(1);
192
193 /*
194 * Initialise per-controller queues.
195 */
196 amr_init_qhead(&sc->amr_freecmds);
197 amr_init_qhead(&sc->amr_ready);
198 TAILQ_INIT(&sc->amr_cmd_clusters);
199 bioq_init(&sc->amr_bioq);
200
201 debug(2, "queue init done");
202
203 /*
204 * Configure for this controller type.
205 */
206 if (AMR_IS_QUARTZ(sc)) {
207 sc->amr_submit_command = amr_quartz_submit_command;
208 sc->amr_get_work = amr_quartz_get_work;
209 sc->amr_poll_command = amr_quartz_poll_command;
210 sc->amr_poll_command1 = amr_quartz_poll_command1;
211 } else {
212 sc->amr_submit_command = amr_std_submit_command;
213 sc->amr_get_work = amr_std_get_work;
214 sc->amr_poll_command = amr_std_poll_command;
215 amr_std_attach_mailbox(sc);
216 }
217
218 #ifdef AMR_BOARD_INIT
219 if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc)))
220 return(ENXIO);
221 #endif
222
223 /*
224 * Allocate initial commands.
225 */
226 amr_alloccmd_cluster(sc);
227
228 /*
229 * Quiz controller for features and limits.
230 */
231 if (amr_query_controller(sc))
232 return(ENXIO);
233
234 debug(2, "controller query complete");
235
236 /*
237 * preallocate the remaining commands.
238 */
239 while (sc->amr_nextslot < sc->amr_maxio)
240 amr_alloccmd_cluster(sc);
241
242 /*
243 * Setup sysctls.
244 */
245 amr_init_sysctl(sc);
246
247 /*
248 * Attach our 'real' SCSI channels to CAM.
249 */
250 child = device_add_child(sc->amr_dev, "amrp", -1);
251 sc->amr_pass = child;
252 if (child != NULL) {
253 device_set_softc(child, sc);
254 device_set_desc(child, "SCSI Passthrough Bus");
255 bus_generic_attach(sc->amr_dev);
256 }
257
258 /*
259 * Create the control device.
260 */
261 sc->amr_dev_t = make_dev(&amr_ops, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR,
262 S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev));
263 sc->amr_dev_t->si_drv1 = sc;
264 linux_no_adapter++;
265 if (device_get_unit(sc->amr_dev) == 0)
266 make_dev_alias(sc->amr_dev_t, "megadev0");
267
268 /*
269 * Schedule ourselves to bring the controller up once interrupts are
270 * available.
271 */
272 bzero(&sc->amr_ich, sizeof(struct intr_config_hook));
273 sc->amr_ich.ich_func = amr_startup;
274 sc->amr_ich.ich_arg = sc;
275 sc->amr_ich.ich_desc = "amr";
276 if (config_intrhook_establish(&sc->amr_ich) != 0) {
277 device_printf(sc->amr_dev, "can't establish configuration hook\n");
278 return(ENOMEM);
279 }
280
281 /*
282 * Print a little information about the controller.
283 */
284 amr_describe_controller(sc);
285
286 debug(2, "attach complete");
287 return(0);
288 }
289
290 /********************************************************************************
291 * Locate disk resources and attach children to them.
292 */
293 static void
amr_startup(void * arg)294 amr_startup(void *arg)
295 {
296 struct amr_softc *sc = (struct amr_softc *)arg;
297 struct amr_logdrive *dr;
298 int i, error;
299
300 debug_called(1);
301
302 /* pull ourselves off the intrhook chain */
303 if (sc->amr_ich.ich_func)
304 config_intrhook_disestablish(&sc->amr_ich);
305 sc->amr_ich.ich_func = NULL;
306
307 /* get up-to-date drive information */
308 if (amr_query_controller(sc)) {
309 device_printf(sc->amr_dev, "can't scan controller for drives\n");
310 return;
311 }
312
313 /* iterate over available drives */
314 for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) {
315 /* are we already attached to this drive? */
316 if (dr->al_disk == 0) {
317 /* generate geometry information */
318 if (dr->al_size > 0x200000) { /* extended translation? */
319 dr->al_heads = 255;
320 dr->al_sectors = 63;
321 } else {
322 dr->al_heads = 64;
323 dr->al_sectors = 32;
324 }
325 dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors);
326
327 dr->al_disk = device_add_child(sc->amr_dev, NULL, -1);
328 if (dr->al_disk == 0)
329 device_printf(sc->amr_dev, "device_add_child failed\n");
330 device_set_ivars(dr->al_disk, dr);
331 }
332 }
333
334 if ((error = bus_generic_attach(sc->amr_dev)) != 0)
335 device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error);
336
337 /* mark controller back up */
338 sc->amr_state &= ~AMR_STATE_SHUTDOWN;
339
340 /* interrupts will be enabled before we do anything more */
341 sc->amr_state |= AMR_STATE_INTEN;
342
343 return;
344 }
345
346 static void
amr_init_sysctl(struct amr_softc * sc)347 amr_init_sysctl(struct amr_softc *sc)
348 {
349 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->amr_dev);
350 struct sysctl_oid *tree = device_get_sysctl_tree(sc->amr_dev);
351
352 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
353 OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0,
354 "");
355 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
356 OID_AUTO, "nextslot", CTLFLAG_RD, &sc->amr_nextslot, 0,
357 "");
358 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
359 OID_AUTO, "busyslots", CTLFLAG_RD, &sc->amr_busyslots, 0,
360 "");
361 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
362 OID_AUTO, "maxio", CTLFLAG_RD, &sc->amr_maxio, 0,
363 "");
364 }
365
366
367 /*******************************************************************************
368 * Free resources associated with a controller instance
369 */
370 void
amr_free(struct amr_softc * sc)371 amr_free(struct amr_softc *sc)
372 {
373 struct amr_command_cluster *acc;
374
375 /* detach from CAM */
376 if (sc->amr_pass != NULL)
377 device_delete_child(sc->amr_dev, sc->amr_pass);
378
379 /* throw away any command buffers */
380 while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) {
381 TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link);
382 amr_freecmd_cluster(acc);
383 }
384
385 /* destroy control device */
386 if(sc->amr_dev_t != NULL)
387 destroy_dev(sc->amr_dev_t);
388 dev_ops_remove_minor(&amr_ops, device_get_unit(sc->amr_dev));
389
390 #if 0 /* XXX swildner */
391 if (mtx_initialized(&sc->amr_hw_lock))
392 mtx_destroy(&sc->amr_hw_lock);
393
394 if (mtx_initialized(&sc->amr_list_lock))
395 mtx_destroy(&sc->amr_list_lock);
396 #endif
397
398 lockuninit(&sc->amr_hw_lock);
399 lockuninit(&sc->amr_list_lock);
400 }
401
402 /*******************************************************************************
403 * Receive a bio structure from a child device and queue it on a particular
404 * disk resource, then poke the disk resource to start as much work as it can.
405 */
406 int
amr_submit_bio(struct amr_softc * sc,struct bio * bio)407 amr_submit_bio(struct amr_softc *sc, struct bio *bio)
408 {
409 debug_called(2);
410
411 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
412 amr_enqueue_bio(sc, bio);
413 amr_startio(sc);
414 lockmgr(&sc->amr_list_lock, LK_RELEASE);
415 return(0);
416 }
417
418 /********************************************************************************
419 * Accept an open operation on the control device.
420 */
421 static int
amr_open(struct dev_open_args * ap)422 amr_open(struct dev_open_args *ap)
423 {
424 cdev_t dev = ap->a_head.a_dev;
425 int unit = minor(dev);
426 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
427
428 debug_called(1);
429
430 sc->amr_state |= AMR_STATE_OPEN;
431 return(0);
432 }
433
434 /********************************************************************************
435 * Accept the last close on the control device.
436 */
437 static int
amr_close(struct dev_close_args * ap)438 amr_close(struct dev_close_args *ap)
439 {
440 cdev_t dev = ap->a_head.a_dev;
441 int unit = minor(dev);
442 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
443
444 debug_called(1);
445
446 sc->amr_state &= ~AMR_STATE_OPEN;
447 return (0);
448 }
449
450 /********************************************************************************
451 * Handle controller-specific control operations.
452 */
453 static void
amr_rescan_drives(struct cdev * dev)454 amr_rescan_drives(struct cdev *dev)
455 {
456 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
457 int i, error = 0;
458
459 sc->amr_state |= AMR_STATE_REMAP_LD;
460 while (sc->amr_busyslots) {
461 device_printf(sc->amr_dev, "idle controller\n");
462 amr_done(sc);
463 }
464
465 /* mark ourselves as in-shutdown */
466 sc->amr_state |= AMR_STATE_SHUTDOWN;
467
468 /* flush controller */
469 device_printf(sc->amr_dev, "flushing cache...");
470 kprintf("%s\n", amr_flush(sc) ? "failed" : "done");
471
472 /* delete all our child devices */
473 for(i = 0 ; i < AMR_MAXLD; i++) {
474 if(sc->amr_drive[i].al_disk != 0) {
475 if((error = device_delete_child(sc->amr_dev,
476 sc->amr_drive[i].al_disk)) != 0)
477 goto shutdown_out;
478
479 sc->amr_drive[i].al_disk = 0;
480 }
481 }
482
483 shutdown_out:
484 amr_startup(sc);
485 }
486
487 /*
488 * Bug-for-bug compatibility with Linux!
489 * Some apps will send commands with inlen and outlen set to 0,
490 * even though they expect data to be transfered to them from the
491 * card. Linux accidentally allows this by allocating a 4KB
492 * buffer for the transfer anyways, but it then throws it away
493 * without copying it back to the app.
494 *
495 * The amr(4) firmware relies on this feature. In fact, it assumes
496 * the buffer is always a power of 2 up to a max of 64k. There is
497 * also at least one case where it assumes a buffer less than 16k is
498 * greater than 16k. Force a minimum buffer size of 32k and round
499 * sizes between 32k and 64k up to 64k as a workaround.
500 */
501 static unsigned long
amr_ioctl_buffer_length(unsigned long len)502 amr_ioctl_buffer_length(unsigned long len)
503 {
504
505 if (len <= 32 * 1024)
506 return (32 * 1024);
507 if (len <= 64 * 1024)
508 return (64 * 1024);
509 return (len);
510 }
511
512 static int
amr_linux_ioctl_int(struct cdev * dev,u_long cmd,caddr_t addr,int32_t flag,struct sysmsg * sm)513 amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag,
514 struct sysmsg *sm)
515 {
516 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
517 struct amr_command *ac;
518 struct amr_mailbox *mb;
519 struct amr_linux_ioctl ali;
520 void *dp, *temp;
521 int error;
522 int len, ac_flags = 0;
523 int logical_drives_changed = 0;
524 u_int32_t linux_version = 0x02100000;
525 u_int8_t status;
526 struct amr_passthrough *ap; /* 60 bytes */
527
528 error = 0;
529 dp = NULL;
530 ac = NULL;
531 ap = NULL;
532
533 if ((error = copyin(addr, &ali, sizeof(ali))) != 0)
534 return (error);
535 switch (ali.ui.fcs.opcode) {
536 case 0x82:
537 switch(ali.ui.fcs.subopcode) {
538 case 'e':
539 copyout(&linux_version, (void *)(uintptr_t)ali.data,
540 sizeof(linux_version));
541 error = 0;
542 break;
543
544 case 'm':
545 copyout(&linux_no_adapter, (void *)(uintptr_t)ali.data,
546 sizeof(linux_no_adapter));
547 sm->sysmsg_result = linux_no_adapter;
548 error = 0;
549 break;
550
551 default:
552 kprintf("Unknown subopcode\n");
553 error = ENOIOCTL;
554 break;
555 }
556 break;
557
558 case 0x80:
559 case 0x81:
560 if (ali.ui.fcs.opcode == 0x80)
561 len = max(ali.outlen, ali.inlen);
562 else
563 len = ali.ui.fcs.length;
564
565 mb = (void *)&ali.mbox[0];
566
567 if ((ali.mbox[0] == FC_DEL_LOGDRV && ali.mbox[2] == OP_DEL_LOGDRV) || /* delete */
568 (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) { /* create */
569 if (sc->amr_allow_vol_config == 0) {
570 error = EPERM;
571 break;
572 }
573 logical_drives_changed = 1;
574 }
575
576 if (ali.mbox[0] == AMR_CMD_PASS) {
577 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
578 while ((ac = amr_alloccmd(sc)) == NULL)
579 lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz);
580 lockmgr(&sc->amr_list_lock, LK_RELEASE);
581 ap = &ac->ac_ccb->ccb_pthru;
582
583 error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap,
584 sizeof(struct amr_passthrough));
585 if (error)
586 break;
587
588 if (ap->ap_data_transfer_length)
589 dp = kmalloc(ap->ap_data_transfer_length, M_AMR,
590 M_WAITOK | M_ZERO);
591
592 if (ali.inlen) {
593 error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address,
594 dp, ap->ap_data_transfer_length);
595 if (error)
596 break;
597 }
598
599 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB;
600 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
601 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
602 ac->ac_flags = ac_flags;
603
604 ac->ac_data = dp;
605 ac->ac_length = ap->ap_data_transfer_length;
606 temp = (void *)(uintptr_t)ap->ap_data_transfer_address;
607
608 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
609 error = amr_wait_command(ac);
610 lockmgr(&sc->amr_list_lock, LK_RELEASE);
611 if (error)
612 break;
613
614 status = ac->ac_status;
615 error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status));
616 if (error)
617 break;
618
619 if (ali.outlen) {
620 error = copyout(dp, temp, ap->ap_data_transfer_length);
621 if (error)
622 break;
623 }
624 error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length);
625 if (error)
626 break;
627
628 error = 0;
629 break;
630 } else if (ali.mbox[0] == AMR_CMD_PASS_64) {
631 kprintf("No AMR_CMD_PASS_64\n");
632 error = ENOIOCTL;
633 break;
634 } else if (ali.mbox[0] == AMR_CMD_EXTPASS) {
635 kprintf("No AMR_CMD_EXTPASS\n");
636 error = ENOIOCTL;
637 break;
638 } else {
639 len = amr_ioctl_buffer_length(imax(ali.inlen, ali.outlen));
640
641 dp = kmalloc(len, M_AMR, M_WAITOK | M_ZERO);
642
643 if (ali.inlen) {
644 error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len);
645 if (error)
646 break;
647 }
648
649 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
650 while ((ac = amr_alloccmd(sc)) == NULL)
651 lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz);
652
653 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
654 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
655 bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox));
656
657 ac->ac_length = len;
658 ac->ac_data = dp;
659 ac->ac_flags = ac_flags;
660
661 error = amr_wait_command(ac);
662 lockmgr(&sc->amr_list_lock, LK_RELEASE);
663 if (error)
664 break;
665
666 status = ac->ac_status;
667 error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status));
668 if (ali.outlen) {
669 error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, ali.outlen);
670 if (error)
671 break;
672 }
673
674 error = 0;
675 if (logical_drives_changed)
676 amr_rescan_drives(dev);
677 break;
678 }
679 break;
680
681 default:
682 debug(1, "unknown linux ioctl 0x%lx", cmd);
683 kprintf("unknown linux ioctl 0x%lx\n", cmd);
684 error = ENOIOCTL;
685 break;
686 }
687
688 /*
689 * At this point, we know that there is a lock held and that these
690 * objects have been allocated.
691 */
692 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
693 if (ac != NULL)
694 amr_releasecmd(ac);
695 lockmgr(&sc->amr_list_lock, LK_RELEASE);
696 if (dp != NULL)
697 kfree(dp, M_AMR);
698 return(error);
699 }
700
701 static int
amr_ioctl(struct dev_ioctl_args * ap)702 amr_ioctl(struct dev_ioctl_args *ap)
703 {
704 cdev_t dev = ap->a_head.a_dev;
705 caddr_t addr = ap->a_data;
706 u_long cmd = ap->a_cmd;
707 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
708 union {
709 void *_p;
710 struct amr_user_ioctl *au;
711 #ifdef AMR_IO_COMMAND32
712 struct amr_user_ioctl32 *au32;
713 #endif
714 int *result;
715 } arg;
716 struct amr_command *ac;
717 struct amr_mailbox_ioctl *mbi;
718 void *dp, *au_buffer;
719 unsigned long au_length, real_length;
720 unsigned char *au_cmd;
721 int *au_statusp;
722 int error;
723 struct amr_passthrough *_ap; /* 60 bytes */
724 int logical_drives_changed = 0;
725
726 debug_called(1);
727
728 arg._p = (void *)addr;
729
730 error = 0;
731 dp = NULL;
732 ac = NULL;
733 _ap = NULL;
734
735 switch(cmd) {
736
737 case AMR_IO_VERSION:
738 debug(1, "AMR_IO_VERSION");
739 *arg.result = AMR_IO_VERSION_NUMBER;
740 return(0);
741
742 #ifdef AMR_IO_COMMAND32
743 /*
744 * Accept ioctl-s from 32-bit binaries on non-32-bit
745 * platforms, such as AMD. LSI's MEGAMGR utility is
746 * the only example known today... -mi
747 */
748 case AMR_IO_COMMAND32:
749 debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]);
750 au_cmd = arg.au32->au_cmd;
751 au_buffer = (void *)(u_int64_t)arg.au32->au_buffer;
752 au_length = arg.au32->au_length;
753 au_statusp = &arg.au32->au_status;
754 break;
755 #endif
756
757 case AMR_IO_COMMAND:
758 debug(1, "AMR_IO_COMMAND 0x%x", arg.au->au_cmd[0]);
759 au_cmd = arg.au->au_cmd;
760 au_buffer = (void *)arg.au->au_buffer;
761 au_length = arg.au->au_length;
762 au_statusp = &arg.au->au_status;
763 break;
764
765 case 0xc0046d00:
766 case 0xc06e6d00: /* Linux emulation */
767 {
768 devclass_t devclass;
769 struct amr_linux_ioctl ali;
770 int adapter, error;
771
772 devclass = devclass_find("amr");
773 if (devclass == NULL)
774 return (ENOENT);
775
776 error = copyin(addr, &ali, sizeof(ali));
777 if (error)
778 return (error);
779 if (ali.ui.fcs.opcode == 0x82)
780 adapter = 0;
781 else
782 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
783
784 sc = devclass_get_softc(devclass, adapter);
785 if (sc == NULL)
786 return (ENOENT);
787
788 return (amr_linux_ioctl_int(sc->amr_dev_t, cmd, addr, 0, ap->a_sysmsg));
789 }
790 default:
791 debug(1, "unknown ioctl 0x%lx", cmd);
792 return(ENOIOCTL);
793 }
794
795 if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) || /* delete */
796 (au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) { /* create */
797 if (sc->amr_allow_vol_config == 0) {
798 error = EPERM;
799 goto out;
800 }
801 logical_drives_changed = 1;
802 }
803
804 /* handle inbound data buffer */
805 real_length = amr_ioctl_buffer_length(au_length);
806 if (au_length != 0 && au_cmd[0] != 0x06) {
807 if ((dp = kmalloc(real_length, M_AMR, M_WAITOK|M_ZERO)) == NULL) {
808 error = ENOMEM;
809 goto out;
810 }
811 if ((error = copyin(au_buffer, dp, au_length)) != 0) {
812 kfree(dp, M_AMR);
813 return (error);
814 }
815 debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp);
816 }
817
818 /* Allocate this now before the mutex gets held */
819
820 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
821 while ((ac = amr_alloccmd(sc)) == NULL)
822 lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz);
823
824 /* handle SCSI passthrough command */
825 if (au_cmd[0] == AMR_CMD_PASS) {
826 int len;
827
828 _ap = &ac->ac_ccb->ccb_pthru;
829 bzero(_ap, sizeof(struct amr_passthrough));
830
831 /* copy cdb */
832 len = au_cmd[2];
833 _ap->ap_cdb_length = len;
834 bcopy(au_cmd + 3, _ap->ap_cdb, len);
835
836 /* build passthrough */
837 _ap->ap_timeout = au_cmd[len + 3] & 0x07;
838 _ap->ap_ars = (au_cmd[len + 3] & 0x08) ? 1 : 0;
839 _ap->ap_islogical = (au_cmd[len + 3] & 0x80) ? 1 : 0;
840 _ap->ap_logical_drive_no = au_cmd[len + 4];
841 _ap->ap_channel = au_cmd[len + 5];
842 _ap->ap_scsi_id = au_cmd[len + 6];
843 _ap->ap_request_sense_length = 14;
844 _ap->ap_data_transfer_length = au_length;
845 /* XXX what about the request-sense area? does the caller want it? */
846
847 /* build command */
848 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
849 ac->ac_flags = AMR_CMD_CCB;
850
851 } else {
852 /* direct command to controller */
853 mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox;
854
855 /* copy pertinent mailbox items */
856 mbi->mb_command = au_cmd[0];
857 mbi->mb_channel = au_cmd[1];
858 mbi->mb_param = au_cmd[2];
859 mbi->mb_pad[0] = au_cmd[3];
860 mbi->mb_drive = au_cmd[4];
861 ac->ac_flags = 0;
862 }
863
864 /* build the command */
865 ac->ac_data = dp;
866 ac->ac_length = real_length;
867 ac->ac_flags |= AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
868
869 /* run the command */
870 error = amr_wait_command(ac);
871 lockmgr(&sc->amr_list_lock, LK_RELEASE);
872 if (error)
873 goto out;
874
875 /* copy out data and set status */
876 if (au_length != 0) {
877 error = copyout(dp, au_buffer, au_length);
878 }
879 debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer);
880 if (dp != NULL)
881 debug(2, "%p status 0x%x", dp, ac->ac_status);
882 *au_statusp = ac->ac_status;
883
884 out:
885 /*
886 * At this point, we know that there is a lock held and that these
887 * objects have been allocated.
888 */
889 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
890 if (ac != NULL)
891 amr_releasecmd(ac);
892 lockmgr(&sc->amr_list_lock, LK_RELEASE);
893 if (dp != NULL)
894 kfree(dp, M_AMR);
895
896 if (logical_drives_changed)
897 amr_rescan_drives(dev);
898
899 return(error);
900 }
901
902 /********************************************************************************
903 ********************************************************************************
904 Command Wrappers
905 ********************************************************************************
906 ********************************************************************************/
907
908 /********************************************************************************
909 * Interrogate the controller for the operational parameters we require.
910 */
911 static int
amr_query_controller(struct amr_softc * sc)912 amr_query_controller(struct amr_softc *sc)
913 {
914 struct amr_enquiry3 *aex;
915 struct amr_prodinfo *ap;
916 struct amr_enquiry *ae;
917 int ldrv;
918 int status;
919
920 /*
921 * Greater than 10 byte cdb support
922 */
923 sc->support_ext_cdb = amr_support_ext_cdb(sc);
924
925 if(sc->support_ext_cdb) {
926 debug(2,"supports extended CDBs.");
927 }
928
929 /*
930 * Try to issue an ENQUIRY3 command
931 */
932 if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
933 AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) {
934
935 /*
936 * Fetch current state of logical drives.
937 */
938 for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
939 sc->amr_drive[ldrv].al_size = aex->ae_drivesize[ldrv];
940 sc->amr_drive[ldrv].al_state = aex->ae_drivestate[ldrv];
941 sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv];
942 debug(2, " drive %d: %d state %x properties %x", ldrv, sc->amr_drive[ldrv].al_size,
943 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
944 }
945 kfree(aex, M_AMR);
946
947 /*
948 * Get product info for channel count.
949 */
950 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) {
951 device_printf(sc->amr_dev, "can't obtain product data from controller\n");
952 return(1);
953 }
954 sc->amr_maxdrives = 40;
955 sc->amr_maxchan = ap->ap_nschan;
956 sc->amr_maxio = ap->ap_maxio;
957 sc->amr_type |= AMR_TYPE_40LD;
958 kfree(ap, M_AMR);
959
960 ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status);
961 if (ap != NULL)
962 kfree(ap, M_AMR);
963 if (!status) {
964 sc->amr_ld_del_supported = 1;
965 device_printf(sc->amr_dev, "delete logical drives supported by controller\n");
966 }
967 } else {
968
969 /* failed, try the 8LD ENQUIRY commands */
970 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) {
971 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) {
972 device_printf(sc->amr_dev, "can't obtain configuration data from controller\n");
973 return(1);
974 }
975 ae->ae_signature = 0;
976 }
977
978 /*
979 * Fetch current state of logical drives.
980 */
981 for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
982 sc->amr_drive[ldrv].al_size = ae->ae_ldrv.al_size[ldrv];
983 sc->amr_drive[ldrv].al_state = ae->ae_ldrv.al_state[ldrv];
984 sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv];
985 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
986 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
987 }
988
989 sc->amr_maxdrives = 8;
990 sc->amr_maxchan = ae->ae_adapter.aa_channels;
991 sc->amr_maxio = ae->ae_adapter.aa_maxio;
992 kfree(ae, M_AMR);
993 }
994
995 /*
996 * Mark remaining drives as unused.
997 */
998 for (; ldrv < AMR_MAXLD; ldrv++)
999 sc->amr_drive[ldrv].al_size = 0xffffffff;
1000
1001 /*
1002 * Cap the maximum number of outstanding I/Os. AMI's Linux driver doesn't trust
1003 * the controller's reported value, and lockups have been seen when we do.
1004 */
1005 sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD);
1006
1007 return(0);
1008 }
1009
1010 /********************************************************************************
1011 * Run a generic enquiry-style command.
1012 */
1013 static void *
amr_enquiry(struct amr_softc * sc,size_t bufsize,u_int8_t cmd,u_int8_t cmdsub,u_int8_t cmdqual,int * status)1014 amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status)
1015 {
1016 struct amr_command *ac;
1017 void *result;
1018 u_int8_t *mbox;
1019 int error;
1020
1021 debug_called(1);
1022
1023 *status = 0; /* avoid gcc warnings */
1024 error = 1;
1025 result = NULL;
1026
1027 /* get ourselves a command buffer */
1028 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1029 ac = amr_alloccmd(sc);
1030 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1031 if (ac == NULL)
1032 goto out;
1033 /* allocate the response structure */
1034 if ((result = kmalloc(bufsize, M_AMR, M_ZERO|M_NOWAIT)) == NULL)
1035 goto out;
1036 /* set command flags */
1037
1038 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN;
1039
1040 /* point the command at our data */
1041 ac->ac_data = result;
1042 ac->ac_length = bufsize;
1043
1044 /* build the command proper */
1045 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1046 mbox[0] = cmd;
1047 mbox[2] = cmdsub;
1048 mbox[3] = cmdqual;
1049 *status = 0;
1050
1051 /* can't assume that interrupts are going to work here, so play it safe */
1052 if (sc->amr_poll_command(ac))
1053 goto out;
1054 error = ac->ac_status;
1055 *status = ac->ac_status;
1056
1057 out:
1058 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1059 if (ac != NULL)
1060 amr_releasecmd(ac);
1061 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1062 if ((error != 0) && (result != NULL)) {
1063 kfree(result, M_AMR);
1064 result = NULL;
1065 }
1066 return(result);
1067 }
1068
1069 /********************************************************************************
1070 * Flush the controller's internal cache, return status.
1071 */
1072 int
amr_flush(struct amr_softc * sc)1073 amr_flush(struct amr_softc *sc)
1074 {
1075 struct amr_command *ac;
1076 int error;
1077
1078 /* get ourselves a command buffer */
1079 error = 1;
1080 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1081 ac = amr_alloccmd(sc);
1082 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1083 if (ac == NULL)
1084 goto out;
1085 /* set command flags */
1086 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1087
1088 /* build the command proper */
1089 ac->ac_mailbox.mb_command = AMR_CMD_FLUSH;
1090
1091 /* we have to poll, as the system may be going down or otherwise damaged */
1092 if (sc->amr_poll_command(ac))
1093 goto out;
1094 error = ac->ac_status;
1095
1096 out:
1097 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1098 if (ac != NULL)
1099 amr_releasecmd(ac);
1100 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1101 return(error);
1102 }
1103
1104 /********************************************************************************
1105 * Detect extented cdb >> greater than 10 byte cdb support
1106 * returns '1' means this support exist
1107 * returns '0' means this support doesn't exist
1108 */
1109 static int
amr_support_ext_cdb(struct amr_softc * sc)1110 amr_support_ext_cdb(struct amr_softc *sc)
1111 {
1112 struct amr_command *ac;
1113 u_int8_t *mbox;
1114 int error;
1115
1116 /* get ourselves a command buffer */
1117 error = 0;
1118 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1119 ac = amr_alloccmd(sc);
1120 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1121 if (ac == NULL)
1122 goto out;
1123 /* set command flags */
1124 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1125
1126 /* build the command proper */
1127 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1128 mbox[0] = 0xA4;
1129 mbox[2] = 0x16;
1130
1131
1132 /* we have to poll, as the system may be going down or otherwise damaged */
1133 if (sc->amr_poll_command(ac))
1134 goto out;
1135 if( ac->ac_status == AMR_STATUS_SUCCESS ) {
1136 error = 1;
1137 }
1138
1139 out:
1140 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1141 if (ac != NULL)
1142 amr_releasecmd(ac);
1143 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1144 return(error);
1145 }
1146
1147 /********************************************************************************
1148 * Try to find I/O work for the controller from one or more of the work queues.
1149 *
1150 * We make the assumption that if the controller is not ready to take a command
1151 * at some given time, it will generate an interrupt at some later time when
1152 * it is.
1153 */
1154 void
amr_startio(struct amr_softc * sc)1155 amr_startio(struct amr_softc *sc)
1156 {
1157 struct amr_command *ac;
1158
1159 /* spin until something prevents us from doing any work */
1160 for (;;) {
1161
1162 /* Don't bother to queue commands no bounce buffers are available. */
1163 if (sc->amr_state & AMR_STATE_QUEUE_FRZN)
1164 break;
1165
1166 /* try to get a ready command */
1167 ac = amr_dequeue_ready(sc);
1168
1169 /* if that failed, build a command from a bio */
1170 if (ac == NULL)
1171 (void)amr_bio_command(sc, &ac);
1172
1173 /* if that failed, build a command from a ccb */
1174 if ((ac == NULL) && (sc->amr_cam_command != NULL))
1175 sc->amr_cam_command(sc, &ac);
1176
1177 /* if we don't have anything to do, give up */
1178 if (ac == NULL)
1179 break;
1180
1181 /* try to give the command to the controller; if this fails save it for later and give up */
1182 if (amr_start(ac)) {
1183 debug(2, "controller busy, command deferred");
1184 amr_requeue_ready(ac); /* XXX schedule retry very soon? */
1185 break;
1186 }
1187 }
1188 }
1189
1190 /********************************************************************************
1191 * Handle completion of an I/O command.
1192 */
1193 static void
amr_completeio(struct amr_command * ac)1194 amr_completeio(struct amr_command *ac)
1195 {
1196 struct amr_softc *sc = ac->ac_sc;
1197 static struct timeval lastfail;
1198 static int curfail;
1199 struct buf *bp = ac->ac_bio->bio_buf;
1200
1201 if (ac->ac_status != AMR_STATUS_SUCCESS) { /* could be more verbose here? */
1202 bp->b_error = EIO;
1203 bp->b_flags |= B_ERROR;
1204
1205 if (ppsratecheck(&lastfail, &curfail, 1))
1206 device_printf(sc->amr_dev, "I/O error - 0x%x\n", ac->ac_status);
1207 /* amr_printcommand(ac);*/
1208 }
1209 amrd_intr(ac->ac_bio);
1210 lockmgr(&ac->ac_sc->amr_list_lock, LK_EXCLUSIVE);
1211 amr_releasecmd(ac);
1212 lockmgr(&ac->ac_sc->amr_list_lock, LK_RELEASE);
1213 }
1214
1215 /********************************************************************************
1216 ********************************************************************************
1217 Command Processing
1218 ********************************************************************************
1219 ********************************************************************************/
1220
1221 /********************************************************************************
1222 * Convert a bio off the top of the bio queue into a command.
1223 */
1224 static int
amr_bio_command(struct amr_softc * sc,struct amr_command ** acp)1225 amr_bio_command(struct amr_softc *sc, struct amr_command **acp)
1226 {
1227 struct amr_command *ac;
1228 struct amrd_softc *amrd;
1229 struct bio *bio;
1230 struct buf *bp;
1231 int error;
1232 int blkcount;
1233 int driveno;
1234 int cmd;
1235
1236 ac = NULL;
1237 error = 0;
1238
1239 /* get a command */
1240 if ((ac = amr_alloccmd(sc)) == NULL)
1241 return (ENOMEM);
1242
1243 /* get a bio to work on */
1244 if ((bio = amr_dequeue_bio(sc)) == NULL) {
1245 amr_releasecmd(ac);
1246 return (0);
1247 }
1248
1249 /* connect the bio to the command */
1250 bp = bio->bio_buf;
1251 ac->ac_complete = amr_completeio;
1252 ac->ac_bio = bio;
1253 ac->ac_data = bp->b_data;
1254 ac->ac_length = bp->b_bcount;
1255 cmd = 0;
1256 switch (bp->b_cmd) {
1257 case BUF_CMD_READ:
1258 ac->ac_flags |= AMR_CMD_DATAIN;
1259 if (AMR_IS_SG64(sc)) {
1260 cmd = AMR_CMD_LREAD64;
1261 ac->ac_flags |= AMR_CMD_SG64;
1262 } else
1263 cmd = AMR_CMD_LREAD;
1264 break;
1265 case BUF_CMD_WRITE:
1266 ac->ac_flags |= AMR_CMD_DATAOUT;
1267 if (AMR_IS_SG64(sc)) {
1268 cmd = AMR_CMD_LWRITE64;
1269 ac->ac_flags |= AMR_CMD_SG64;
1270 } else
1271 cmd = AMR_CMD_LWRITE;
1272 break;
1273 case BUF_CMD_FLUSH:
1274 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1275 cmd = AMR_CMD_FLUSH;
1276 break;
1277 default:
1278 panic("Invalid bio command");
1279 }
1280 amrd = (struct amrd_softc *)bio->bio_driver_info;
1281 driveno = amrd->amrd_drive - sc->amr_drive;
1282 blkcount = (bp->b_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE;
1283
1284 ac->ac_mailbox.mb_command = cmd;
1285 if (bp->b_cmd & (BUF_CMD_READ|BUF_CMD_WRITE)) {
1286 ac->ac_mailbox.mb_blkcount = blkcount;
1287 ac->ac_mailbox.mb_lba = bio->bio_offset / AMR_BLKSIZE;
1288 if (((bio->bio_offset / AMR_BLKSIZE) + blkcount) > sc->amr_drive[driveno].al_size) {
1289 device_printf(sc->amr_dev,
1290 "I/O beyond end of unit (%lld,%d > %lu)\n",
1291 (long long)(bio->bio_offset / AMR_BLKSIZE), blkcount,
1292 (u_long)sc->amr_drive[driveno].al_size);
1293 }
1294 }
1295 ac->ac_mailbox.mb_drive = driveno;
1296 if (sc->amr_state & AMR_STATE_REMAP_LD)
1297 ac->ac_mailbox.mb_drive |= 0x80;
1298
1299 /* we fill in the s/g related data when the command is mapped */
1300
1301
1302 *acp = ac;
1303 return(error);
1304 }
1305
1306 /********************************************************************************
1307 * Take a command, submit it to the controller and sleep until it completes
1308 * or fails. Interrupts must be enabled, returns nonzero on error.
1309 */
1310 static int
amr_wait_command(struct amr_command * ac)1311 amr_wait_command(struct amr_command *ac)
1312 {
1313 int error = 0;
1314 struct amr_softc *sc = ac->ac_sc;
1315
1316 debug_called(1);
1317
1318 ac->ac_complete = NULL;
1319 ac->ac_flags |= AMR_CMD_SLEEP;
1320 if ((error = amr_start(ac)) != 0) {
1321 return(error);
1322 }
1323
1324 while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) {
1325 error = lksleep(ac,&sc->amr_list_lock, 0, "amrwcmd", 0);
1326 }
1327
1328 return(error);
1329 }
1330
1331 /********************************************************************************
1332 * Take a command, submit it to the controller and busy-wait for it to return.
1333 * Returns nonzero on error. Can be safely called with interrupts enabled.
1334 */
1335 static int
amr_std_poll_command(struct amr_command * ac)1336 amr_std_poll_command(struct amr_command *ac)
1337 {
1338 struct amr_softc *sc = ac->ac_sc;
1339 int error, count;
1340
1341 debug_called(2);
1342
1343 ac->ac_complete = NULL;
1344 if ((error = amr_start(ac)) != 0)
1345 return(error);
1346
1347 count = 0;
1348 do {
1349 /*
1350 * Poll for completion, although the interrupt handler may beat us to it.
1351 * Note that the timeout here is somewhat arbitrary.
1352 */
1353 amr_done(sc);
1354 DELAY(1000);
1355 } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000));
1356 if (!(ac->ac_flags & AMR_CMD_BUSY)) {
1357 error = 0;
1358 } else {
1359 /* XXX the slot is now marked permanently busy */
1360 error = EIO;
1361 device_printf(sc->amr_dev, "polled command timeout\n");
1362 }
1363 return(error);
1364 }
1365
1366 static void
amr_setup_polled_dmamap(void * arg,bus_dma_segment_t * segs,int nsegs,int err)1367 amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1368 {
1369 struct amr_command *ac = arg;
1370 struct amr_softc *sc = ac->ac_sc;
1371 int mb_channel;
1372
1373 if (err) {
1374 device_printf(sc->amr_dev, "error %d in %s", err, __func__);
1375 ac->ac_status = AMR_STATUS_ABORTED;
1376 return;
1377 }
1378
1379 amr_setup_sg(arg, segs, nsegs, err);
1380
1381 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1382 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1383 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1384 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1385 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1386 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1387
1388 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1389 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1390 if (AC_IS_SG64(ac)) {
1391 ac->ac_sg64_hi = 0;
1392 ac->ac_sg64_lo = ac->ac_sgbusaddr;
1393 }
1394
1395 sc->amr_poll_command1(sc, ac);
1396 }
1397
1398 /********************************************************************************
1399 * Take a command, submit it to the controller and busy-wait for it to return.
1400 * Returns nonzero on error. Can be safely called with interrupts enabled.
1401 */
1402 static int
amr_quartz_poll_command(struct amr_command * ac)1403 amr_quartz_poll_command(struct amr_command *ac)
1404 {
1405 struct amr_softc *sc = ac->ac_sc;
1406 int error;
1407
1408 debug_called(2);
1409
1410 error = 0;
1411
1412 if (AC_IS_SG64(ac)) {
1413 ac->ac_tag = sc->amr_buffer64_dmat;
1414 ac->ac_datamap = ac->ac_dma64map;
1415 } else {
1416 ac->ac_tag = sc->amr_buffer_dmat;
1417 ac->ac_datamap = ac->ac_dmamap;
1418 }
1419
1420 /* now we have a slot, we can map the command (unmapped in amr_complete) */
1421 if (ac->ac_data != NULL && ac->ac_length != 0) {
1422 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1423 ac->ac_length, amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) {
1424 error = 1;
1425 }
1426 } else {
1427 error = amr_quartz_poll_command1(sc, ac);
1428 }
1429
1430 return (error);
1431 }
1432
1433 static int
amr_quartz_poll_command1(struct amr_softc * sc,struct amr_command * ac)1434 amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac)
1435 {
1436 int count, error;
1437
1438 lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE);
1439 if ((sc->amr_state & AMR_STATE_INTEN) == 0) {
1440 count=0;
1441 while (sc->amr_busyslots) {
1442 lksleep(sc, &sc->amr_hw_lock, PCATCH, "amrpoll", hz);
1443 if(count++>10) {
1444 break;
1445 }
1446 }
1447
1448 if(sc->amr_busyslots) {
1449 device_printf(sc->amr_dev, "adapter is busy\n");
1450 lockmgr(&sc->amr_hw_lock, LK_RELEASE);
1451 if (ac->ac_data != NULL) {
1452 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1453 }
1454 ac->ac_status=0;
1455 return(1);
1456 }
1457 }
1458
1459 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE);
1460
1461 /* clear the poll/ack fields in the mailbox */
1462 sc->amr_mailbox->mb_ident = 0xFE;
1463 sc->amr_mailbox->mb_nstatus = 0xFF;
1464 sc->amr_mailbox->mb_status = 0xFF;
1465 sc->amr_mailbox->mb_poll = 0;
1466 sc->amr_mailbox->mb_ack = 0;
1467 sc->amr_mailbox->mb_busy = 1;
1468
1469 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
1470
1471 while(sc->amr_mailbox->mb_nstatus == 0xFF)
1472 DELAY(1);
1473 while(sc->amr_mailbox->mb_status == 0xFF)
1474 DELAY(1);
1475 ac->ac_status=sc->amr_mailbox->mb_status;
1476 error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0;
1477 while(sc->amr_mailbox->mb_poll != 0x77)
1478 DELAY(1);
1479 sc->amr_mailbox->mb_poll = 0;
1480 sc->amr_mailbox->mb_ack = 0x77;
1481
1482 /* acknowledge that we have the commands */
1483 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK);
1484 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
1485 DELAY(1);
1486 lockmgr(&sc->amr_hw_lock, LK_RELEASE);
1487
1488 /* unmap the command's data buffer */
1489 if (ac->ac_flags & AMR_CMD_DATAIN) {
1490 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTREAD);
1491 }
1492 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1493 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTWRITE);
1494 }
1495 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1496
1497 return(error);
1498 }
1499
1500 static __inline int
amr_freeslot(struct amr_command * ac)1501 amr_freeslot(struct amr_command *ac)
1502 {
1503 struct amr_softc *sc = ac->ac_sc;
1504 int slot;
1505
1506 debug_called(3);
1507
1508 slot = ac->ac_slot;
1509 if (sc->amr_busycmd[slot] == NULL)
1510 panic("amr: slot %d not busy?", slot);
1511
1512 sc->amr_busycmd[slot] = NULL;
1513 atomic_subtract_int(&sc->amr_busyslots, 1);
1514
1515 return (0);
1516 }
1517
1518 /********************************************************************************
1519 * Map/unmap (ac)'s data in the controller's addressable space as required.
1520 *
1521 * These functions may be safely called multiple times on a given command.
1522 */
1523 static void
amr_setup_sg(void * arg,bus_dma_segment_t * segs,int nsegments,int error)1524 amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1525 {
1526 struct amr_command *ac = (struct amr_command *)arg;
1527 struct amr_sgentry *sg;
1528 struct amr_sg64entry *sg64;
1529 int flags, i;
1530
1531 debug_called(3);
1532
1533 /* get base address of s/g table */
1534 sg = ac->ac_sg.sg32;
1535 sg64 = ac->ac_sg.sg64;
1536
1537 if (AC_IS_SG64(ac)) {
1538 ac->ac_nsegments = nsegments;
1539 ac->ac_mb_physaddr = 0xffffffff;
1540 for (i = 0; i < nsegments; i++, sg64++) {
1541 sg64->sg_addr = segs[i].ds_addr;
1542 sg64->sg_count = segs[i].ds_len;
1543 }
1544 } else {
1545 /* decide whether we need to populate the s/g table */
1546 if (nsegments < 2) {
1547 ac->ac_nsegments = 0;
1548 ac->ac_mb_physaddr = segs[0].ds_addr;
1549 } else {
1550 ac->ac_nsegments = nsegments;
1551 ac->ac_mb_physaddr = ac->ac_sgbusaddr;
1552 for (i = 0; i < nsegments; i++, sg++) {
1553 sg->sg_addr = segs[i].ds_addr;
1554 sg->sg_count = segs[i].ds_len;
1555 }
1556 }
1557 }
1558
1559 flags = 0;
1560 if (ac->ac_flags & AMR_CMD_DATAIN)
1561 flags |= BUS_DMASYNC_PREREAD;
1562 if (ac->ac_flags & AMR_CMD_DATAOUT)
1563 flags |= BUS_DMASYNC_PREWRITE;
1564 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flags);
1565 ac->ac_flags |= AMR_CMD_MAPPED;
1566 }
1567
1568 static void
amr_setup_data(void * arg,bus_dma_segment_t * segs,int nsegs,int err)1569 amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1570 {
1571 struct amr_command *ac = arg;
1572 struct amr_softc *sc = ac->ac_sc;
1573 int mb_channel;
1574
1575 if (err) {
1576 device_printf(sc->amr_dev, "error %d in %s", err, __func__);
1577 amr_abort_load(ac);
1578 return;
1579 }
1580
1581 amr_setup_sg(arg, segs, nsegs, err);
1582
1583 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1584 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1585 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1586 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1587 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1588 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1589
1590 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1591 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1592 if (AC_IS_SG64(ac)) {
1593 ac->ac_sg64_hi = 0;
1594 ac->ac_sg64_lo = ac->ac_sgbusaddr;
1595 }
1596
1597 if (sc->amr_submit_command(ac) == EBUSY) {
1598 amr_freeslot(ac);
1599 amr_requeue_ready(ac);
1600 }
1601 }
1602
1603 static void
amr_setup_ccb(void * arg,bus_dma_segment_t * segs,int nsegs,int err)1604 amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1605 {
1606 struct amr_command *ac = arg;
1607 struct amr_softc *sc = ac->ac_sc;
1608 struct amr_passthrough *ap = &ac->ac_ccb->ccb_pthru;
1609 struct amr_ext_passthrough *aep = &ac->ac_ccb->ccb_epthru;
1610
1611 if (err) {
1612 device_printf(sc->amr_dev, "error %d in %s", err, __func__);
1613 amr_abort_load(ac);
1614 return;
1615 }
1616
1617 /* Set up the mailbox portion of the command to point at the ccb */
1618 ac->ac_mailbox.mb_nsgelem = 0;
1619 ac->ac_mailbox.mb_physaddr = ac->ac_ccb_busaddr;
1620
1621 amr_setup_sg(arg, segs, nsegs, err);
1622
1623 switch (ac->ac_mailbox.mb_command) {
1624 case AMR_CMD_EXTPASS:
1625 aep->ap_no_sg_elements = ac->ac_nsegments;
1626 aep->ap_data_transfer_address = ac->ac_mb_physaddr;
1627 break;
1628 case AMR_CMD_PASS:
1629 ap->ap_no_sg_elements = ac->ac_nsegments;
1630 ap->ap_data_transfer_address = ac->ac_mb_physaddr;
1631 break;
1632 default:
1633 panic("Unknown ccb command");
1634 }
1635
1636 if (sc->amr_submit_command(ac) == EBUSY) {
1637 amr_freeslot(ac);
1638 amr_requeue_ready(ac);
1639 }
1640 }
1641
1642 static int
amr_mapcmd(struct amr_command * ac)1643 amr_mapcmd(struct amr_command *ac)
1644 {
1645 bus_dmamap_callback_t *cb;
1646 struct amr_softc *sc = ac->ac_sc;
1647
1648 debug_called(3);
1649
1650 if (AC_IS_SG64(ac)) {
1651 ac->ac_tag = sc->amr_buffer64_dmat;
1652 ac->ac_datamap = ac->ac_dma64map;
1653 } else {
1654 ac->ac_tag = sc->amr_buffer_dmat;
1655 ac->ac_datamap = ac->ac_dmamap;
1656 }
1657
1658 if (ac->ac_flags & AMR_CMD_CCB)
1659 cb = amr_setup_ccb;
1660 else
1661 cb = amr_setup_data;
1662
1663 /* if the command involves data at all, and hasn't been mapped */
1664 if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) {
1665 /* map the data buffers into bus space and build the s/g list */
1666 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1667 ac->ac_length, cb, ac, 0) == EINPROGRESS) {
1668 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1669 }
1670 } else {
1671 if (sc->amr_submit_command(ac) == EBUSY) {
1672 amr_freeslot(ac);
1673 amr_requeue_ready(ac);
1674 }
1675 }
1676
1677 return (0);
1678 }
1679
1680 static void
amr_unmapcmd(struct amr_command * ac)1681 amr_unmapcmd(struct amr_command *ac)
1682 {
1683 int flag;
1684
1685 debug_called(3);
1686
1687 /* if the command involved data at all and was mapped */
1688 if (ac->ac_flags & AMR_CMD_MAPPED) {
1689
1690 if (ac->ac_data != NULL) {
1691
1692 flag = 0;
1693 if (ac->ac_flags & AMR_CMD_DATAIN)
1694 flag |= BUS_DMASYNC_POSTREAD;
1695 if (ac->ac_flags & AMR_CMD_DATAOUT)
1696 flag |= BUS_DMASYNC_POSTWRITE;
1697
1698 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flag);
1699 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1700 }
1701
1702 ac->ac_flags &= ~AMR_CMD_MAPPED;
1703 }
1704 }
1705
1706 static void
amr_abort_load(struct amr_command * ac)1707 amr_abort_load(struct amr_command *ac)
1708 {
1709 ac_qhead_t head;
1710 struct amr_softc *sc = ac->ac_sc;
1711
1712 KKASSERT(lockstatus(&sc->amr_list_lock, curthread) != 0);
1713
1714 ac->ac_status = AMR_STATUS_ABORTED;
1715 amr_init_qhead(&head);
1716 amr_enqueue_completed(ac, &head);
1717
1718 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1719 amr_complete(sc, &head);
1720 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1721 }
1722
1723 /********************************************************************************
1724 * Take a command and give it to the controller, returns 0 if successful, or
1725 * EBUSY if the command should be retried later.
1726 */
1727 static int
amr_start(struct amr_command * ac)1728 amr_start(struct amr_command *ac)
1729 {
1730 struct amr_softc *sc;
1731 int error = 0;
1732 int slot;
1733
1734 debug_called(3);
1735
1736 /* mark command as busy so that polling consumer can tell */
1737 sc = ac->ac_sc;
1738 ac->ac_flags |= AMR_CMD_BUSY;
1739
1740 /* get a command slot (freed in amr_done) */
1741 slot = ac->ac_slot;
1742 if (sc->amr_busycmd[slot] != NULL)
1743 panic("amr: slot %d busy?", slot);
1744 sc->amr_busycmd[slot] = ac;
1745 atomic_add_int(&sc->amr_busyslots, 1);
1746
1747 /* Now we have a slot, we can map the command (unmapped in amr_complete). */
1748 if ((error = amr_mapcmd(ac)) == ENOMEM) {
1749 /*
1750 * Memroy resources are short, so free the slot and let this be tried
1751 * later.
1752 */
1753 amr_freeslot(ac);
1754 }
1755
1756 return (error);
1757 }
1758
1759 /********************************************************************************
1760 * Extract one or more completed commands from the controller (sc)
1761 *
1762 * Returns nonzero if any commands on the work queue were marked as completed.
1763 */
1764
1765 int
amr_done(struct amr_softc * sc)1766 amr_done(struct amr_softc *sc)
1767 {
1768 ac_qhead_t head;
1769 struct amr_command *ac;
1770 struct amr_mailbox mbox;
1771 int i, idx, result;
1772
1773 debug_called(3);
1774
1775 /* See if there's anything for us to do */
1776 result = 0;
1777 amr_init_qhead(&head);
1778
1779 /* loop collecting completed commands */
1780 for (;;) {
1781 /* poll for a completed command's identifier and status */
1782 if (sc->amr_get_work(sc, &mbox)) {
1783 result = 1;
1784
1785 /* iterate over completed commands in this result */
1786 for (i = 0; i < mbox.mb_nstatus; i++) {
1787 /* get pointer to busy command */
1788 idx = mbox.mb_completed[i] - 1;
1789 ac = sc->amr_busycmd[idx];
1790
1791 /* really a busy command? */
1792 if (ac != NULL) {
1793
1794 /* pull the command from the busy index */
1795 amr_freeslot(ac);
1796
1797 /* save status for later use */
1798 ac->ac_status = mbox.mb_status;
1799 amr_enqueue_completed(ac, &head);
1800 debug(3, "completed command with status %x", mbox.mb_status);
1801 } else {
1802 device_printf(sc->amr_dev, "bad slot %d completed\n", idx);
1803 }
1804 }
1805 } else
1806 break; /* no work */
1807 }
1808
1809 /* handle completion and timeouts */
1810 amr_complete(sc, &head);
1811
1812 return(result);
1813 }
1814
1815 /********************************************************************************
1816 * Do completion processing on done commands on (sc)
1817 */
1818
1819 static void
amr_complete(void * context,ac_qhead_t * head)1820 amr_complete(void *context, ac_qhead_t *head)
1821 {
1822 struct amr_softc *sc = (struct amr_softc *)context;
1823 struct amr_command *ac;
1824
1825 debug_called(3);
1826
1827 /* pull completed commands off the queue */
1828 for (;;) {
1829 ac = amr_dequeue_completed(sc, head);
1830 if (ac == NULL)
1831 break;
1832
1833 /* unmap the command's data buffer */
1834 amr_unmapcmd(ac);
1835
1836 /*
1837 * Is there a completion handler?
1838 */
1839 if (ac->ac_complete != NULL) {
1840 /* unbusy the command */
1841 ac->ac_flags &= ~AMR_CMD_BUSY;
1842 ac->ac_complete(ac);
1843
1844 /*
1845 * Is someone sleeping on this one?
1846 */
1847 } else {
1848 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1849 ac->ac_flags &= ~AMR_CMD_BUSY;
1850 if (ac->ac_flags & AMR_CMD_SLEEP) {
1851 /* unbusy the command */
1852 wakeup(ac);
1853 }
1854 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1855 }
1856
1857 if(!sc->amr_busyslots) {
1858 wakeup(sc);
1859 }
1860 }
1861
1862 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1863 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
1864 amr_startio(sc);
1865 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1866 }
1867
1868 /********************************************************************************
1869 ********************************************************************************
1870 Command Buffer Management
1871 ********************************************************************************
1872 ********************************************************************************/
1873
1874 /********************************************************************************
1875 * Get a new command buffer.
1876 *
1877 * This may return NULL in low-memory cases.
1878 *
1879 * If possible, we recycle a command buffer that's been used before.
1880 */
1881 struct amr_command *
amr_alloccmd(struct amr_softc * sc)1882 amr_alloccmd(struct amr_softc *sc)
1883 {
1884 struct amr_command *ac;
1885
1886 debug_called(3);
1887
1888 ac = amr_dequeue_free(sc);
1889 if (ac == NULL) {
1890 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1891 return(NULL);
1892 }
1893
1894 /* clear out significant fields */
1895 ac->ac_status = 0;
1896 bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox));
1897 ac->ac_flags = 0;
1898 ac->ac_bio = NULL;
1899 ac->ac_data = NULL;
1900 ac->ac_complete = NULL;
1901 ac->ac_retries = 0;
1902 ac->ac_tag = NULL;
1903 ac->ac_datamap = NULL;
1904 return(ac);
1905 }
1906
1907 /********************************************************************************
1908 * Release a command buffer for recycling.
1909 */
1910 void
amr_releasecmd(struct amr_command * ac)1911 amr_releasecmd(struct amr_command *ac)
1912 {
1913 debug_called(3);
1914
1915 amr_enqueue_free(ac);
1916 }
1917
1918 /********************************************************************************
1919 * Allocate a new command cluster and initialise it.
1920 */
1921 static void
amr_alloccmd_cluster(struct amr_softc * sc)1922 amr_alloccmd_cluster(struct amr_softc *sc)
1923 {
1924 struct amr_command_cluster *acc;
1925 struct amr_command *ac;
1926 int i, nextslot;
1927
1928 /*
1929 * If we haven't found the real limit yet, let us have a couple of
1930 * commands in order to be able to probe.
1931 */
1932 if (sc->amr_maxio == 0)
1933 sc->amr_maxio = 2;
1934
1935 if (sc->amr_nextslot > sc->amr_maxio)
1936 return;
1937 acc = kmalloc(AMR_CMD_CLUSTERSIZE, M_AMR, M_NOWAIT | M_ZERO);
1938 if (acc != NULL) {
1939 nextslot = sc->amr_nextslot;
1940 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE);
1941 TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link);
1942 lockmgr(&sc->amr_list_lock, LK_RELEASE);
1943 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
1944 ac = &acc->acc_command[i];
1945 ac->ac_sc = sc;
1946 ac->ac_slot = nextslot;
1947
1948 /*
1949 * The SG table for each slot is a fixed size and is assumed to
1950 * to hold 64-bit s/g objects when the driver is configured to do
1951 * 64-bit DMA. 32-bit DMA commands still use the same table, but
1952 * cast down to 32-bit objects.
1953 */
1954 if (AMR_IS_SG64(sc)) {
1955 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
1956 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry));
1957 ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG);
1958 } else {
1959 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
1960 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry));
1961 ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
1962 }
1963
1964 ac->ac_ccb = sc->amr_ccb + ac->ac_slot;
1965 ac->ac_ccb_busaddr = sc->amr_ccb_busaddr +
1966 (ac->ac_slot * sizeof(union amr_ccb));
1967
1968 if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap))
1969 break;
1970 if (AMR_IS_SG64(sc) &&
1971 (bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map)))
1972 break;
1973 amr_releasecmd(ac);
1974 if (++nextslot > sc->amr_maxio)
1975 break;
1976 }
1977 sc->amr_nextslot = nextslot;
1978 }
1979 }
1980
1981 /********************************************************************************
1982 * Free a command cluster
1983 */
1984 static void
amr_freecmd_cluster(struct amr_command_cluster * acc)1985 amr_freecmd_cluster(struct amr_command_cluster *acc)
1986 {
1987 struct amr_softc *sc = acc->acc_command[0].ac_sc;
1988 int i;
1989
1990 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
1991 if (acc->acc_command[i].ac_sc == NULL)
1992 break;
1993 bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap);
1994 if (AMR_IS_SG64(sc))
1995 bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map);
1996 }
1997 kfree(acc, M_AMR);
1998 }
1999
2000 /********************************************************************************
2001 ********************************************************************************
2002 Interface-specific Shims
2003 ********************************************************************************
2004 ********************************************************************************/
2005
2006 /********************************************************************************
2007 * Tell the controller that the mailbox contains a valid command
2008 */
2009 static int
amr_quartz_submit_command(struct amr_command * ac)2010 amr_quartz_submit_command(struct amr_command *ac)
2011 {
2012 struct amr_softc *sc = ac->ac_sc;
2013 static struct timeval lastfail;
2014 static int curfail;
2015 int i = 0;
2016
2017 lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE);
2018 while (sc->amr_mailbox->mb_busy && (i++ < 10)) {
2019 DELAY(1);
2020 /* This is a no-op read that flushes pending mailbox updates */
2021 AMR_QGET_ODB(sc);
2022 }
2023 if (sc->amr_mailbox->mb_busy) {
2024 lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2025 if (ac->ac_retries++ > 1000) {
2026 if (ppsratecheck(&lastfail, &curfail, 1))
2027 device_printf(sc->amr_dev, "Too many retries on command %p. "
2028 "Controller is likely dead\n", ac);
2029 ac->ac_retries = 0;
2030 }
2031 return (EBUSY);
2032 }
2033
2034 /*
2035 * Save the slot number so that we can locate this command when complete.
2036 * Note that ident = 0 seems to be special, so we don't use it.
2037 */
2038 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2039 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2040 sc->amr_mailbox->mb_busy = 1;
2041 sc->amr_mailbox->mb_poll = 0;
2042 sc->amr_mailbox->mb_ack = 0;
2043 sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi;
2044 sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo;
2045
2046 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
2047 lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2048 return(0);
2049 }
2050
2051 static int
amr_std_submit_command(struct amr_command * ac)2052 amr_std_submit_command(struct amr_command *ac)
2053 {
2054 struct amr_softc *sc = ac->ac_sc;
2055 static struct timeval lastfail;
2056 static int curfail;
2057
2058 lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE);
2059 if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) {
2060 lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2061 if (ac->ac_retries++ > 1000) {
2062 if (ppsratecheck(&lastfail, &curfail, 1))
2063 device_printf(sc->amr_dev, "Too many retries on command %p. "
2064 "Controller is likely dead\n", ac);
2065 ac->ac_retries = 0;
2066 }
2067 return (EBUSY);
2068 }
2069
2070 /*
2071 * Save the slot number so that we can locate this command when complete.
2072 * Note that ident = 0 seems to be special, so we don't use it.
2073 */
2074 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2075 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2076 sc->amr_mailbox->mb_busy = 1;
2077 sc->amr_mailbox->mb_poll = 0;
2078 sc->amr_mailbox->mb_ack = 0;
2079
2080 AMR_SPOST_COMMAND(sc);
2081 lockmgr(&sc->amr_hw_lock, LK_RELEASE);
2082 return(0);
2083 }
2084
2085 /********************************************************************************
2086 * Claim any work that the controller has completed; acknowledge completion,
2087 * save details of the completion in (mbsave)
2088 */
2089 static int
amr_quartz_get_work(struct amr_softc * sc,struct amr_mailbox * mbsave)2090 amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2091 {
2092 int worked, i;
2093 u_int32_t outd;
2094 u_int8_t nstatus;
2095 u_int8_t completed[46];
2096
2097 debug_called(3);
2098
2099 worked = 0;
2100
2101 /* work waiting for us? */
2102 if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) {
2103
2104 /* acknowledge interrupt */
2105 AMR_QPUT_ODB(sc, AMR_QODB_READY);
2106
2107 while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff)
2108 DELAY(1);
2109 sc->amr_mailbox->mb_nstatus = 0xff;
2110
2111 /* wait until fw wrote out all completions */
2112 for (i = 0; i < nstatus; i++) {
2113 while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff)
2114 DELAY(1);
2115 sc->amr_mailbox->mb_completed[i] = 0xff;
2116 }
2117
2118 /* Save information for later processing */
2119 mbsave->mb_nstatus = nstatus;
2120 mbsave->mb_status = sc->amr_mailbox->mb_status;
2121 sc->amr_mailbox->mb_status = 0xff;
2122
2123 for (i = 0; i < nstatus; i++)
2124 mbsave->mb_completed[i] = completed[i];
2125
2126 /* acknowledge that we have the commands */
2127 AMR_QPUT_IDB(sc, AMR_QIDB_ACK);
2128
2129 #if 0
2130 #ifndef AMR_QUARTZ_GOFASTER
2131 /*
2132 * This waits for the controller to notice that we've taken the
2133 * command from it. It's very inefficient, and we shouldn't do it,
2134 * but if we remove this code, we stop completing commands under
2135 * load.
2136 *
2137 * Peter J says we shouldn't do this. The documentation says we
2138 * should. Who is right?
2139 */
2140 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
2141 ; /* XXX aiee! what if it dies? */
2142 #endif
2143 #endif
2144
2145 worked = 1; /* got some work */
2146 }
2147
2148 return(worked);
2149 }
2150
2151 static int
amr_std_get_work(struct amr_softc * sc,struct amr_mailbox * mbsave)2152 amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2153 {
2154 int worked;
2155 u_int8_t istat;
2156
2157 debug_called(3);
2158
2159 worked = 0;
2160
2161 /* check for valid interrupt status */
2162 istat = AMR_SGET_ISTAT(sc);
2163 if ((istat & AMR_SINTR_VALID) != 0) {
2164 AMR_SPUT_ISTAT(sc, istat); /* ack interrupt status */
2165
2166 /* save mailbox, which contains a list of completed commands */
2167 bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave));
2168
2169 AMR_SACK_INTERRUPT(sc); /* acknowledge we have the mailbox */
2170 worked = 1;
2171 }
2172
2173 return(worked);
2174 }
2175
2176 /********************************************************************************
2177 * Notify the controller of the mailbox location.
2178 */
2179 static void
amr_std_attach_mailbox(struct amr_softc * sc)2180 amr_std_attach_mailbox(struct amr_softc *sc)
2181 {
2182
2183 /* program the mailbox physical address */
2184 AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys & 0xff);
2185 AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >> 8) & 0xff);
2186 AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff);
2187 AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff);
2188 AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR);
2189
2190 /* clear any outstanding interrupt and enable interrupts proper */
2191 AMR_SACK_INTERRUPT(sc);
2192 AMR_SENABLE_INTR(sc);
2193 }
2194
2195 #ifdef AMR_BOARD_INIT
2196 /********************************************************************************
2197 * Initialise the controller
2198 */
2199 static int
amr_quartz_init(struct amr_softc * sc)2200 amr_quartz_init(struct amr_softc *sc)
2201 {
2202 int status, ostatus;
2203
2204 device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc));
2205
2206 AMR_QRESET(sc);
2207
2208 ostatus = 0xff;
2209 while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) {
2210 if (status != ostatus) {
2211 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status));
2212 ostatus = status;
2213 }
2214 switch (status) {
2215 case AMR_QINIT_NOMEM:
2216 return(ENOMEM);
2217
2218 case AMR_QINIT_SCAN:
2219 /* XXX we could print channel/target here */
2220 break;
2221 }
2222 }
2223 return(0);
2224 }
2225
2226 static int
amr_std_init(struct amr_softc * sc)2227 amr_std_init(struct amr_softc *sc)
2228 {
2229 int status, ostatus;
2230
2231 device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc));
2232
2233 AMR_SRESET(sc);
2234
2235 ostatus = 0xff;
2236 while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) {
2237 if (status != ostatus) {
2238 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status));
2239 ostatus = status;
2240 }
2241 switch (status) {
2242 case AMR_SINIT_NOMEM:
2243 return(ENOMEM);
2244
2245 case AMR_SINIT_INPROG:
2246 /* XXX we could print channel/target here? */
2247 break;
2248 }
2249 }
2250 return(0);
2251 }
2252 #endif
2253
2254 /********************************************************************************
2255 ********************************************************************************
2256 Debugging
2257 ********************************************************************************
2258 ********************************************************************************/
2259
2260 /********************************************************************************
2261 * Identify the controller and print some information about it.
2262 */
2263 static void
amr_describe_controller(struct amr_softc * sc)2264 amr_describe_controller(struct amr_softc *sc)
2265 {
2266 struct amr_prodinfo *ap;
2267 struct amr_enquiry *ae;
2268 char *prod;
2269 int status;
2270
2271 /*
2272 * Try to get 40LD product info, which tells us what the card is labelled as.
2273 */
2274 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) {
2275 device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n",
2276 ap->ap_product, ap->ap_firmware, ap->ap_bios,
2277 ap->ap_memsize);
2278
2279 kfree(ap, M_AMR);
2280 return;
2281 }
2282
2283 /*
2284 * Try 8LD extended ENQUIRY to get controller signature, and use lookup table.
2285 */
2286 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) {
2287 prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature);
2288
2289 } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) {
2290
2291 /*
2292 * Try to work it out based on the PCI signatures.
2293 */
2294 switch (pci_get_device(sc->amr_dev)) {
2295 case 0x9010:
2296 prod = "Series 428";
2297 break;
2298 case 0x9060:
2299 prod = "Series 434";
2300 break;
2301 default:
2302 prod = "unknown controller";
2303 break;
2304 }
2305 } else {
2306 device_printf(sc->amr_dev, "<unsupported controller>\n");
2307 return;
2308 }
2309
2310 /*
2311 * HP NetRaid controllers have a special encoding of the firmware and
2312 * BIOS versions. The AMI version seems to have it as strings whereas
2313 * the HP version does it with a leading uppercase character and two
2314 * binary numbers.
2315 */
2316
2317 if(ae->ae_adapter.aa_firmware[2] >= 'A' &&
2318 ae->ae_adapter.aa_firmware[2] <= 'Z' &&
2319 ae->ae_adapter.aa_firmware[1] < ' ' &&
2320 ae->ae_adapter.aa_firmware[0] < ' ' &&
2321 ae->ae_adapter.aa_bios[2] >= 'A' &&
2322 ae->ae_adapter.aa_bios[2] <= 'Z' &&
2323 ae->ae_adapter.aa_bios[1] < ' ' &&
2324 ae->ae_adapter.aa_bios[0] < ' ') {
2325
2326 /* this looks like we have an HP NetRaid version of the MegaRaid */
2327
2328 if(ae->ae_signature == AMR_SIG_438) {
2329 /* the AMI 438 is a NetRaid 3si in HP-land */
2330 prod = "HP NetRaid 3si";
2331 }
2332
2333 device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n",
2334 prod, ae->ae_adapter.aa_firmware[2],
2335 ae->ae_adapter.aa_firmware[1],
2336 ae->ae_adapter.aa_firmware[0],
2337 ae->ae_adapter.aa_bios[2],
2338 ae->ae_adapter.aa_bios[1],
2339 ae->ae_adapter.aa_bios[0],
2340 ae->ae_adapter.aa_memorysize);
2341 } else {
2342 device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n",
2343 prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios,
2344 ae->ae_adapter.aa_memorysize);
2345 }
2346 kfree(ae, M_AMR);
2347 }
2348
2349 int
amr_dump_blocks(struct amr_softc * sc,int unit,u_int32_t lba,void * data,int blks)2350 amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks)
2351 {
2352 struct amr_command *ac;
2353 int error = EIO;
2354
2355 debug_called(1);
2356
2357 sc->amr_state |= AMR_STATE_INTEN;
2358
2359 /* get ourselves a command buffer */
2360 if ((ac = amr_alloccmd(sc)) == NULL)
2361 goto out;
2362 /* set command flags */
2363 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
2364
2365 /* point the command at our data */
2366 ac->ac_data = data;
2367 ac->ac_length = blks * AMR_BLKSIZE;
2368
2369 /* build the command proper */
2370 ac->ac_mailbox.mb_command = AMR_CMD_LWRITE;
2371 ac->ac_mailbox.mb_blkcount = blks;
2372 ac->ac_mailbox.mb_lba = lba;
2373 ac->ac_mailbox.mb_drive = unit;
2374
2375 /* can't assume that interrupts are going to work here, so play it safe */
2376 if (sc->amr_poll_command(ac))
2377 goto out;
2378 error = ac->ac_status;
2379
2380 out:
2381 if (ac != NULL)
2382 amr_releasecmd(ac);
2383
2384 sc->amr_state &= ~AMR_STATE_INTEN;
2385 return (error);
2386 }
2387
2388
2389
2390 #ifdef AMR_DEBUG
2391 /********************************************************************************
2392 * Print the command (ac) in human-readable format
2393 */
2394 #if 0
2395 static void
2396 amr_printcommand(struct amr_command *ac)
2397 {
2398 struct amr_softc *sc = ac->ac_sc;
2399 struct amr_sgentry *sg;
2400 int i;
2401
2402 device_printf(sc->amr_dev, "cmd %x ident %d drive %d\n",
2403 ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive);
2404 device_printf(sc->amr_dev, "blkcount %d lba %d\n",
2405 ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba);
2406 device_printf(sc->amr_dev, "virtaddr %p length %lu\n", ac->ac_data, (unsigned long)ac->ac_length);
2407 device_printf(sc->amr_dev, "sg physaddr %08x nsg %d\n",
2408 ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem);
2409 device_printf(sc->amr_dev, "ccb %p bio %p\n", ac->ac_ccb_data, ac->ac_bio);
2410
2411 /* get base address of s/g table */
2412 sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2413 for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++)
2414 device_printf(sc->amr_dev, " %x/%d\n", sg->sg_addr, sg->sg_count);
2415 }
2416 #endif
2417 #endif
2418