1 /*- 2 * Copyright (c) 1999,2000 Michael Smith 3 * Copyright (c) 2000 BSDi 4 * Copyright (c) 2005 Scott Long 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002 Eric Moore 30 * Copyright (c) 2002, 2004 LSI Logic Corporation 31 * All rights reserved. 32 * 33 * Redistribution and use in source and binary forms, with or without 34 * modification, are permitted provided that the following conditions 35 * are met: 36 * 1. Redistributions of source code must retain the above copyright 37 * notice, this list of conditions and the following disclaimer. 38 * 2. Redistributions in binary form must reproduce the above copyright 39 * notice, this list of conditions and the following disclaimer in the 40 * documentation and/or other materials provided with the distribution. 41 * 3. The party using or redistributing the source code and binary forms 42 * agrees to the disclaimer below and the terms and conditions set forth 43 * herein. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 * 57 * $FreeBSD: src/sys/dev/amr/amr.c,v 1.97 2012/04/20 20:27:31 jhb Exp $ 58 */ 59 60 /* 61 * Driver for the AMI MegaRaid family of controllers. 62 */ 63 64 #include "opt_amr.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/malloc.h> 69 #include <sys/kernel.h> 70 #include <sys/proc.h> 71 #include <sys/sysctl.h> 72 #include <sys/sysmsg.h> 73 74 #include <sys/bio.h> 75 #include <sys/bus.h> 76 #include <sys/conf.h> 77 #include <sys/stat.h> 78 79 #include <machine/cpu.h> 80 #include <sys/rman.h> 81 82 #include <bus/pci/pcireg.h> 83 #include <bus/pci/pcivar.h> 84 85 #include <dev/raid/amr/amrio.h> 86 #include <dev/raid/amr/amrreg.h> 87 #include <dev/raid/amr/amrvar.h> 88 #define AMR_DEFINE_TABLES 89 #include <dev/raid/amr/amr_tables.h> 90 91 SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD, 0, "AMR driver parameters"); 92 93 static d_open_t amr_open; 94 static d_close_t amr_close; 95 static d_ioctl_t amr_ioctl; 96 97 static struct dev_ops amr_ops = { 98 { "amr", 0, 0 }, 99 .d_open = amr_open, 100 .d_close = amr_close, 101 .d_ioctl = amr_ioctl, 102 }; 103 104 int linux_no_adapter = 0; 105 /* 106 * Initialisation, bus interface. 107 */ 108 static void amr_startup(void *arg); 109 110 /* 111 * Command wrappers 112 */ 113 static int amr_query_controller(struct amr_softc *sc); 114 static void *amr_enquiry(struct amr_softc *sc, size_t bufsize, 115 u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status); 116 static void amr_completeio(struct amr_command *ac); 117 static int amr_support_ext_cdb(struct amr_softc *sc); 118 119 /* 120 * Command buffer allocation. 121 */ 122 static void amr_alloccmd_cluster(struct amr_softc *sc); 123 static void amr_freecmd_cluster(struct amr_command_cluster *acc); 124 125 /* 126 * Command processing. 127 */ 128 static int amr_bio_command(struct amr_softc *sc, struct amr_command **acp); 129 static int amr_wait_command(struct amr_command *ac); 130 static int amr_mapcmd(struct amr_command *ac); 131 static void amr_unmapcmd(struct amr_command *ac); 132 static int amr_start(struct amr_command *ac); 133 static void amr_complete(void *context, ac_qhead_t *head); 134 static void amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error); 135 static void amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegments, int error); 136 static void amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegments, int error); 137 static void amr_abort_load(struct amr_command *ac); 138 139 #if 0 140 /* 141 * Status monitoring 142 */ 143 static void amr_periodic(void *data); 144 #endif 145 146 /* 147 * Interface-specific shims 148 */ 149 static int amr_quartz_submit_command(struct amr_command *ac); 150 static int amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave); 151 static int amr_quartz_poll_command(struct amr_command *ac); 152 static int amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac); 153 154 static int amr_std_submit_command(struct amr_command *ac); 155 static int amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave); 156 static int amr_std_poll_command(struct amr_command *ac); 157 static void amr_std_attach_mailbox(struct amr_softc *sc); 158 159 #ifdef AMR_BOARD_INIT 160 static int amr_quartz_init(struct amr_softc *sc); 161 static int amr_std_init(struct amr_softc *sc); 162 #endif 163 164 /* 165 * Debugging 166 */ 167 static void amr_describe_controller(struct amr_softc *sc); 168 #ifdef AMR_DEBUG 169 #if 0 170 static void amr_printcommand(struct amr_command *ac); 171 #endif 172 #endif 173 174 static void amr_init_sysctl(struct amr_softc *sc); 175 static int amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, 176 int32_t flag, struct sysmsg *sm); 177 178 static MALLOC_DEFINE(M_AMR, "amr", "AMR memory"); 179 180 /******************************************************************************** 181 ******************************************************************************** 182 Inline Glue 183 ******************************************************************************** 184 ********************************************************************************/ 185 186 /******************************************************************************** 187 ******************************************************************************** 188 Public Interfaces 189 ******************************************************************************** 190 ********************************************************************************/ 191 192 /******************************************************************************** 193 * Initialise the controller and softc. 194 */ 195 int 196 amr_attach(struct amr_softc *sc) 197 { 198 device_t child; 199 200 debug_called(1); 201 202 /* 203 * Initialise per-controller queues. 204 */ 205 amr_init_qhead(&sc->amr_freecmds); 206 amr_init_qhead(&sc->amr_ready); 207 TAILQ_INIT(&sc->amr_cmd_clusters); 208 bioq_init(&sc->amr_bioq); 209 210 debug(2, "queue init done"); 211 212 /* 213 * Configure for this controller type. 214 */ 215 if (AMR_IS_QUARTZ(sc)) { 216 sc->amr_submit_command = amr_quartz_submit_command; 217 sc->amr_get_work = amr_quartz_get_work; 218 sc->amr_poll_command = amr_quartz_poll_command; 219 sc->amr_poll_command1 = amr_quartz_poll_command1; 220 } else { 221 sc->amr_submit_command = amr_std_submit_command; 222 sc->amr_get_work = amr_std_get_work; 223 sc->amr_poll_command = amr_std_poll_command; 224 amr_std_attach_mailbox(sc); 225 } 226 227 #ifdef AMR_BOARD_INIT 228 if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc))) 229 return(ENXIO); 230 #endif 231 232 /* 233 * Allocate initial commands. 234 */ 235 amr_alloccmd_cluster(sc); 236 237 /* 238 * Quiz controller for features and limits. 239 */ 240 if (amr_query_controller(sc)) 241 return(ENXIO); 242 243 debug(2, "controller query complete"); 244 245 /* 246 * preallocate the remaining commands. 247 */ 248 while (sc->amr_nextslot < sc->amr_maxio) 249 amr_alloccmd_cluster(sc); 250 251 /* 252 * Setup sysctls. 253 */ 254 sysctl_ctx_init(&sc->amr_sysctl_ctx); 255 sc->amr_sysctl_tree = SYSCTL_ADD_NODE(&sc->amr_sysctl_ctx, 256 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 257 device_get_nameunit(sc->amr_dev), CTLFLAG_RD, 0, ""); 258 if (sc->amr_sysctl_tree == NULL) { 259 device_printf(sc->amr_dev, "can't add sysctl node\n"); 260 return (EINVAL); 261 } 262 amr_init_sysctl(sc); 263 264 /* 265 * Attach our 'real' SCSI channels to CAM. 266 */ 267 child = device_add_child(sc->amr_dev, "amrp", -1); 268 sc->amr_pass = child; 269 if (child != NULL) { 270 device_set_softc(child, sc); 271 device_set_desc(child, "SCSI Passthrough Bus"); 272 bus_generic_attach(sc->amr_dev); 273 } 274 275 /* 276 * Create the control device. 277 */ 278 sc->amr_dev_t = make_dev(&amr_ops, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR, 279 S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev)); 280 sc->amr_dev_t->si_drv1 = sc; 281 linux_no_adapter++; 282 if (device_get_unit(sc->amr_dev) == 0) 283 make_dev_alias(sc->amr_dev_t, "megadev0"); 284 285 /* 286 * Schedule ourselves to bring the controller up once interrupts are 287 * available. 288 */ 289 bzero(&sc->amr_ich, sizeof(struct intr_config_hook)); 290 sc->amr_ich.ich_func = amr_startup; 291 sc->amr_ich.ich_arg = sc; 292 sc->amr_ich.ich_desc = "amr"; 293 if (config_intrhook_establish(&sc->amr_ich) != 0) { 294 device_printf(sc->amr_dev, "can't establish configuration hook\n"); 295 return(ENOMEM); 296 } 297 298 /* 299 * Print a little information about the controller. 300 */ 301 amr_describe_controller(sc); 302 303 debug(2, "attach complete"); 304 return(0); 305 } 306 307 /******************************************************************************** 308 * Locate disk resources and attach children to them. 309 */ 310 static void 311 amr_startup(void *arg) 312 { 313 struct amr_softc *sc = (struct amr_softc *)arg; 314 struct amr_logdrive *dr; 315 int i, error; 316 317 debug_called(1); 318 callout_init(&sc->amr_timeout); 319 320 /* pull ourselves off the intrhook chain */ 321 if (sc->amr_ich.ich_func) 322 config_intrhook_disestablish(&sc->amr_ich); 323 sc->amr_ich.ich_func = NULL; 324 325 /* get up-to-date drive information */ 326 if (amr_query_controller(sc)) { 327 device_printf(sc->amr_dev, "can't scan controller for drives\n"); 328 return; 329 } 330 331 /* iterate over available drives */ 332 for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) { 333 /* are we already attached to this drive? */ 334 if (dr->al_disk == 0) { 335 /* generate geometry information */ 336 if (dr->al_size > 0x200000) { /* extended translation? */ 337 dr->al_heads = 255; 338 dr->al_sectors = 63; 339 } else { 340 dr->al_heads = 64; 341 dr->al_sectors = 32; 342 } 343 dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors); 344 345 dr->al_disk = device_add_child(sc->amr_dev, NULL, -1); 346 if (dr->al_disk == 0) 347 device_printf(sc->amr_dev, "device_add_child failed\n"); 348 device_set_ivars(dr->al_disk, dr); 349 } 350 } 351 352 if ((error = bus_generic_attach(sc->amr_dev)) != 0) 353 device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error); 354 355 /* mark controller back up */ 356 sc->amr_state &= ~AMR_STATE_SHUTDOWN; 357 358 /* interrupts will be enabled before we do anything more */ 359 sc->amr_state |= AMR_STATE_INTEN; 360 361 #if 0 362 /* 363 * Start the timeout routine. 364 */ 365 sc->amr_timeout = timeout(amr_periodic, sc, hz); 366 #endif 367 368 return; 369 } 370 371 static void 372 amr_init_sysctl(struct amr_softc *sc) 373 { 374 375 SYSCTL_ADD_INT(&sc->amr_sysctl_ctx, 376 SYSCTL_CHILDREN(sc->amr_sysctl_tree), 377 OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0, 378 ""); 379 SYSCTL_ADD_INT(&sc->amr_sysctl_ctx, 380 SYSCTL_CHILDREN(sc->amr_sysctl_tree), 381 OID_AUTO, "nextslot", CTLFLAG_RD, &sc->amr_nextslot, 0, 382 ""); 383 SYSCTL_ADD_INT(&sc->amr_sysctl_ctx, 384 SYSCTL_CHILDREN(sc->amr_sysctl_tree), 385 OID_AUTO, "busyslots", CTLFLAG_RD, &sc->amr_busyslots, 0, 386 ""); 387 SYSCTL_ADD_INT(&sc->amr_sysctl_ctx, 388 SYSCTL_CHILDREN(sc->amr_sysctl_tree), 389 OID_AUTO, "maxio", CTLFLAG_RD, &sc->amr_maxio, 0, 390 ""); 391 } 392 393 394 /******************************************************************************* 395 * Free resources associated with a controller instance 396 */ 397 void 398 amr_free(struct amr_softc *sc) 399 { 400 struct amr_command_cluster *acc; 401 402 /* detach from CAM */ 403 if (sc->amr_pass != NULL) 404 device_delete_child(sc->amr_dev, sc->amr_pass); 405 406 /* cancel status timeout */ 407 callout_stop(&sc->amr_timeout); 408 409 /* throw away any command buffers */ 410 while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) { 411 TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link); 412 amr_freecmd_cluster(acc); 413 } 414 415 /* destroy control device */ 416 if(sc->amr_dev_t != NULL) 417 destroy_dev(sc->amr_dev_t); 418 dev_ops_remove_minor(&amr_ops, device_get_unit(sc->amr_dev)); 419 420 #if 0 /* XXX swildner */ 421 if (mtx_initialized(&sc->amr_hw_lock)) 422 mtx_destroy(&sc->amr_hw_lock); 423 424 if (mtx_initialized(&sc->amr_list_lock)) 425 mtx_destroy(&sc->amr_list_lock); 426 #endif 427 428 if (sc->amr_sysctl_tree != NULL) 429 sysctl_ctx_free(&sc->amr_sysctl_ctx); 430 431 lockuninit(&sc->amr_hw_lock); 432 lockuninit(&sc->amr_list_lock); 433 } 434 435 /******************************************************************************* 436 * Receive a bio structure from a child device and queue it on a particular 437 * disk resource, then poke the disk resource to start as much work as it can. 438 */ 439 int 440 amr_submit_bio(struct amr_softc *sc, struct bio *bio) 441 { 442 debug_called(2); 443 444 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 445 amr_enqueue_bio(sc, bio); 446 amr_startio(sc); 447 lockmgr(&sc->amr_list_lock, LK_RELEASE); 448 return(0); 449 } 450 451 /******************************************************************************** 452 * Accept an open operation on the control device. 453 */ 454 static int 455 amr_open(struct dev_open_args *ap) 456 { 457 cdev_t dev = ap->a_head.a_dev; 458 int unit = minor(dev); 459 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit); 460 461 debug_called(1); 462 463 sc->amr_state |= AMR_STATE_OPEN; 464 return(0); 465 } 466 467 /******************************************************************************** 468 * Accept the last close on the control device. 469 */ 470 static int 471 amr_close(struct dev_close_args *ap) 472 { 473 cdev_t dev = ap->a_head.a_dev; 474 int unit = minor(dev); 475 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit); 476 477 debug_called(1); 478 479 sc->amr_state &= ~AMR_STATE_OPEN; 480 return (0); 481 } 482 483 /******************************************************************************** 484 * Handle controller-specific control operations. 485 */ 486 static void 487 amr_rescan_drives(struct cdev *dev) 488 { 489 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1; 490 int i, error = 0; 491 492 sc->amr_state |= AMR_STATE_REMAP_LD; 493 while (sc->amr_busyslots) { 494 device_printf(sc->amr_dev, "idle controller\n"); 495 amr_done(sc); 496 } 497 498 /* mark ourselves as in-shutdown */ 499 sc->amr_state |= AMR_STATE_SHUTDOWN; 500 501 /* flush controller */ 502 device_printf(sc->amr_dev, "flushing cache..."); 503 kprintf("%s\n", amr_flush(sc) ? "failed" : "done"); 504 505 /* delete all our child devices */ 506 for(i = 0 ; i < AMR_MAXLD; i++) { 507 if(sc->amr_drive[i].al_disk != 0) { 508 if((error = device_delete_child(sc->amr_dev, 509 sc->amr_drive[i].al_disk)) != 0) 510 goto shutdown_out; 511 512 sc->amr_drive[i].al_disk = 0; 513 } 514 } 515 516 shutdown_out: 517 amr_startup(sc); 518 } 519 520 /* 521 * Bug-for-bug compatibility with Linux! 522 * Some apps will send commands with inlen and outlen set to 0, 523 * even though they expect data to be transfered to them from the 524 * card. Linux accidentally allows this by allocating a 4KB 525 * buffer for the transfer anyways, but it then throws it away 526 * without copying it back to the app. 527 * 528 * The amr(4) firmware relies on this feature. In fact, it assumes 529 * the buffer is always a power of 2 up to a max of 64k. There is 530 * also at least one case where it assumes a buffer less than 16k is 531 * greater than 16k. Force a minimum buffer size of 32k and round 532 * sizes between 32k and 64k up to 64k as a workaround. 533 */ 534 static unsigned long 535 amr_ioctl_buffer_length(unsigned long len) 536 { 537 538 if (len <= 32 * 1024) 539 return (32 * 1024); 540 if (len <= 64 * 1024) 541 return (64 * 1024); 542 return (len); 543 } 544 545 int 546 amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, 547 struct sysmsg *sm) 548 { 549 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1; 550 struct amr_command *ac; 551 struct amr_mailbox *mb; 552 struct amr_linux_ioctl ali; 553 void *dp, *temp; 554 int error; 555 int adapter, len, ac_flags = 0; 556 int logical_drives_changed = 0; 557 u_int32_t linux_version = 0x02100000; 558 u_int8_t status; 559 struct amr_passthrough *ap; /* 60 bytes */ 560 561 error = 0; 562 dp = NULL; 563 ac = NULL; 564 ap = NULL; 565 566 if ((error = copyin(addr, &ali, sizeof(ali))) != 0) 567 return (error); 568 switch (ali.ui.fcs.opcode) { 569 case 0x82: 570 switch(ali.ui.fcs.subopcode) { 571 case 'e': 572 copyout(&linux_version, (void *)(uintptr_t)ali.data, 573 sizeof(linux_version)); 574 error = 0; 575 break; 576 577 case 'm': 578 copyout(&linux_no_adapter, (void *)(uintptr_t)ali.data, 579 sizeof(linux_no_adapter)); 580 sm->sm_result.iresult = linux_no_adapter; 581 error = 0; 582 break; 583 584 default: 585 kprintf("Unknown subopcode\n"); 586 error = ENOIOCTL; 587 break; 588 } 589 break; 590 591 case 0x80: 592 case 0x81: 593 if (ali.ui.fcs.opcode == 0x80) 594 len = max(ali.outlen, ali.inlen); 595 else 596 len = ali.ui.fcs.length; 597 598 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8; 599 600 mb = (void *)&ali.mbox[0]; 601 602 if ((ali.mbox[0] == FC_DEL_LOGDRV && ali.mbox[2] == OP_DEL_LOGDRV) || /* delete */ 603 (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) { /* create */ 604 if (sc->amr_allow_vol_config == 0) { 605 error = EPERM; 606 break; 607 } 608 logical_drives_changed = 1; 609 } 610 611 if (ali.mbox[0] == AMR_CMD_PASS) { 612 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 613 while ((ac = amr_alloccmd(sc)) == NULL) 614 lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz); 615 lockmgr(&sc->amr_list_lock, LK_RELEASE); 616 ap = &ac->ac_ccb->ccb_pthru; 617 618 error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap, 619 sizeof(struct amr_passthrough)); 620 if (error) 621 break; 622 623 if (ap->ap_data_transfer_length) 624 dp = kmalloc(ap->ap_data_transfer_length, M_AMR, 625 M_WAITOK | M_ZERO); 626 627 if (ali.inlen) { 628 error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address, 629 dp, ap->ap_data_transfer_length); 630 if (error) 631 break; 632 } 633 634 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB; 635 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox)); 636 ac->ac_mailbox.mb_command = AMR_CMD_PASS; 637 ac->ac_flags = ac_flags; 638 639 ac->ac_data = dp; 640 ac->ac_length = ap->ap_data_transfer_length; 641 temp = (void *)(uintptr_t)ap->ap_data_transfer_address; 642 643 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 644 error = amr_wait_command(ac); 645 lockmgr(&sc->amr_list_lock, LK_RELEASE); 646 if (error) 647 break; 648 649 status = ac->ac_status; 650 error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status)); 651 if (error) 652 break; 653 654 if (ali.outlen) { 655 error = copyout(dp, temp, ap->ap_data_transfer_length); 656 if (error) 657 break; 658 } 659 error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length); 660 if (error) 661 break; 662 663 error = 0; 664 break; 665 } else if (ali.mbox[0] == AMR_CMD_PASS_64) { 666 kprintf("No AMR_CMD_PASS_64\n"); 667 error = ENOIOCTL; 668 break; 669 } else if (ali.mbox[0] == AMR_CMD_EXTPASS) { 670 kprintf("No AMR_CMD_EXTPASS\n"); 671 error = ENOIOCTL; 672 break; 673 } else { 674 len = amr_ioctl_buffer_length(imax(ali.inlen, ali.outlen)); 675 676 dp = kmalloc(len, M_AMR, M_WAITOK | M_ZERO); 677 678 if (ali.inlen) { 679 error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len); 680 if (error) 681 break; 682 } 683 684 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 685 while ((ac = amr_alloccmd(sc)) == NULL) 686 lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz); 687 688 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT; 689 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox)); 690 bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox)); 691 692 ac->ac_length = len; 693 ac->ac_data = dp; 694 ac->ac_flags = ac_flags; 695 696 error = amr_wait_command(ac); 697 lockmgr(&sc->amr_list_lock, LK_RELEASE); 698 if (error) 699 break; 700 701 status = ac->ac_status; 702 error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status)); 703 if (ali.outlen) { 704 error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, ali.outlen); 705 if (error) 706 break; 707 } 708 709 error = 0; 710 if (logical_drives_changed) 711 amr_rescan_drives(dev); 712 break; 713 } 714 break; 715 716 default: 717 debug(1, "unknown linux ioctl 0x%lx", cmd); 718 kprintf("unknown linux ioctl 0x%lx\n", cmd); 719 error = ENOIOCTL; 720 break; 721 } 722 723 /* 724 * At this point, we know that there is a lock held and that these 725 * objects have been allocated. 726 */ 727 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 728 if (ac != NULL) 729 amr_releasecmd(ac); 730 lockmgr(&sc->amr_list_lock, LK_RELEASE); 731 if (dp != NULL) 732 kfree(dp, M_AMR); 733 return(error); 734 } 735 736 static int 737 amr_ioctl(struct dev_ioctl_args *ap) 738 { 739 cdev_t dev = ap->a_head.a_dev; 740 caddr_t addr = ap->a_data; 741 u_long cmd = ap->a_cmd; 742 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1; 743 union { 744 void *_p; 745 struct amr_user_ioctl *au; 746 #ifdef AMR_IO_COMMAND32 747 struct amr_user_ioctl32 *au32; 748 #endif 749 int *result; 750 } arg; 751 struct amr_command *ac; 752 struct amr_mailbox_ioctl *mbi; 753 void *dp, *au_buffer; 754 unsigned long au_length, real_length; 755 unsigned char *au_cmd; 756 int *au_statusp, au_direction; 757 int error; 758 struct amr_passthrough *_ap; /* 60 bytes */ 759 int logical_drives_changed = 0; 760 761 debug_called(1); 762 763 arg._p = (void *)addr; 764 765 error = 0; 766 dp = NULL; 767 ac = NULL; 768 _ap = NULL; 769 770 switch(cmd) { 771 772 case AMR_IO_VERSION: 773 debug(1, "AMR_IO_VERSION"); 774 *arg.result = AMR_IO_VERSION_NUMBER; 775 return(0); 776 777 #ifdef AMR_IO_COMMAND32 778 /* 779 * Accept ioctl-s from 32-bit binaries on non-32-bit 780 * platforms, such as AMD. LSI's MEGAMGR utility is 781 * the only example known today... -mi 782 */ 783 case AMR_IO_COMMAND32: 784 debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]); 785 au_cmd = arg.au32->au_cmd; 786 au_buffer = (void *)(u_int64_t)arg.au32->au_buffer; 787 au_length = arg.au32->au_length; 788 au_direction = arg.au32->au_direction; 789 au_statusp = &arg.au32->au_status; 790 break; 791 #endif 792 793 case AMR_IO_COMMAND: 794 debug(1, "AMR_IO_COMMAND 0x%x", arg.au->au_cmd[0]); 795 au_cmd = arg.au->au_cmd; 796 au_buffer = (void *)arg.au->au_buffer; 797 au_length = arg.au->au_length; 798 au_direction = arg.au->au_direction; 799 au_statusp = &arg.au->au_status; 800 break; 801 802 case 0xc0046d00: 803 case 0xc06e6d00: /* Linux emulation */ 804 { 805 devclass_t devclass; 806 struct amr_linux_ioctl ali; 807 int adapter, error; 808 809 devclass = devclass_find("amr"); 810 if (devclass == NULL) 811 return (ENOENT); 812 813 error = copyin(addr, &ali, sizeof(ali)); 814 if (error) 815 return (error); 816 if (ali.ui.fcs.opcode == 0x82) 817 adapter = 0; 818 else 819 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8; 820 821 sc = devclass_get_softc(devclass, adapter); 822 if (sc == NULL) 823 return (ENOENT); 824 825 return (amr_linux_ioctl_int(sc->amr_dev_t, cmd, addr, 0, ap->a_sysmsg)); 826 } 827 default: 828 debug(1, "unknown ioctl 0x%lx", cmd); 829 return(ENOIOCTL); 830 } 831 832 if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) || /* delete */ 833 (au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) { /* create */ 834 if (sc->amr_allow_vol_config == 0) { 835 error = EPERM; 836 goto out; 837 } 838 logical_drives_changed = 1; 839 } 840 841 /* handle inbound data buffer */ 842 real_length = amr_ioctl_buffer_length(au_length); 843 if (au_length != 0 && au_cmd[0] != 0x06) { 844 if ((dp = kmalloc(real_length, M_AMR, M_WAITOK|M_ZERO)) == NULL) { 845 error = ENOMEM; 846 goto out; 847 } 848 if ((error = copyin(au_buffer, dp, au_length)) != 0) { 849 kfree(dp, M_AMR); 850 return (error); 851 } 852 debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp); 853 } 854 855 /* Allocate this now before the mutex gets held */ 856 857 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 858 while ((ac = amr_alloccmd(sc)) == NULL) 859 lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz); 860 861 /* handle SCSI passthrough command */ 862 if (au_cmd[0] == AMR_CMD_PASS) { 863 int len; 864 865 _ap = &ac->ac_ccb->ccb_pthru; 866 bzero(_ap, sizeof(struct amr_passthrough)); 867 868 /* copy cdb */ 869 len = au_cmd[2]; 870 _ap->ap_cdb_length = len; 871 bcopy(au_cmd + 3, _ap->ap_cdb, len); 872 873 /* build passthrough */ 874 _ap->ap_timeout = au_cmd[len + 3] & 0x07; 875 _ap->ap_ars = (au_cmd[len + 3] & 0x08) ? 1 : 0; 876 _ap->ap_islogical = (au_cmd[len + 3] & 0x80) ? 1 : 0; 877 _ap->ap_logical_drive_no = au_cmd[len + 4]; 878 _ap->ap_channel = au_cmd[len + 5]; 879 _ap->ap_scsi_id = au_cmd[len + 6]; 880 _ap->ap_request_sense_length = 14; 881 _ap->ap_data_transfer_length = au_length; 882 /* XXX what about the request-sense area? does the caller want it? */ 883 884 /* build command */ 885 ac->ac_mailbox.mb_command = AMR_CMD_PASS; 886 ac->ac_flags = AMR_CMD_CCB; 887 888 } else { 889 /* direct command to controller */ 890 mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox; 891 892 /* copy pertinent mailbox items */ 893 mbi->mb_command = au_cmd[0]; 894 mbi->mb_channel = au_cmd[1]; 895 mbi->mb_param = au_cmd[2]; 896 mbi->mb_pad[0] = au_cmd[3]; 897 mbi->mb_drive = au_cmd[4]; 898 ac->ac_flags = 0; 899 } 900 901 /* build the command */ 902 ac->ac_data = dp; 903 ac->ac_length = real_length; 904 ac->ac_flags |= AMR_CMD_DATAIN|AMR_CMD_DATAOUT; 905 906 /* run the command */ 907 error = amr_wait_command(ac); 908 lockmgr(&sc->amr_list_lock, LK_RELEASE); 909 if (error) 910 goto out; 911 912 /* copy out data and set status */ 913 if (au_length != 0) { 914 error = copyout(dp, au_buffer, au_length); 915 } 916 debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer); 917 if (dp != NULL) 918 debug(2, "%p status 0x%x", dp, ac->ac_status); 919 *au_statusp = ac->ac_status; 920 921 out: 922 /* 923 * At this point, we know that there is a lock held and that these 924 * objects have been allocated. 925 */ 926 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 927 if (ac != NULL) 928 amr_releasecmd(ac); 929 lockmgr(&sc->amr_list_lock, LK_RELEASE); 930 if (dp != NULL) 931 kfree(dp, M_AMR); 932 933 if (logical_drives_changed) 934 amr_rescan_drives(dev); 935 936 return(error); 937 } 938 939 #if 0 940 /******************************************************************************** 941 ******************************************************************************** 942 Status Monitoring 943 ******************************************************************************** 944 ********************************************************************************/ 945 946 /******************************************************************************** 947 * Perform a periodic check of the controller status 948 */ 949 static void 950 amr_periodic(void *data) 951 { 952 struct amr_softc *sc = (struct amr_softc *)data; 953 954 debug_called(2); 955 956 /* XXX perform periodic status checks here */ 957 958 /* compensate for missed interrupts */ 959 amr_done(sc); 960 961 /* reschedule */ 962 callout_reset(&sc->amr_timeout, hz, amr_periodic, sc); 963 } 964 #endif 965 966 /******************************************************************************** 967 ******************************************************************************** 968 Command Wrappers 969 ******************************************************************************** 970 ********************************************************************************/ 971 972 /******************************************************************************** 973 * Interrogate the controller for the operational parameters we require. 974 */ 975 static int 976 amr_query_controller(struct amr_softc *sc) 977 { 978 struct amr_enquiry3 *aex; 979 struct amr_prodinfo *ap; 980 struct amr_enquiry *ae; 981 int ldrv; 982 int status; 983 984 /* 985 * Greater than 10 byte cdb support 986 */ 987 sc->support_ext_cdb = amr_support_ext_cdb(sc); 988 989 if(sc->support_ext_cdb) { 990 debug(2,"supports extended CDBs."); 991 } 992 993 /* 994 * Try to issue an ENQUIRY3 command 995 */ 996 if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3, 997 AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) { 998 999 /* 1000 * Fetch current state of logical drives. 1001 */ 1002 for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) { 1003 sc->amr_drive[ldrv].al_size = aex->ae_drivesize[ldrv]; 1004 sc->amr_drive[ldrv].al_state = aex->ae_drivestate[ldrv]; 1005 sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv]; 1006 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size, 1007 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties); 1008 } 1009 kfree(aex, M_AMR); 1010 1011 /* 1012 * Get product info for channel count. 1013 */ 1014 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) { 1015 device_printf(sc->amr_dev, "can't obtain product data from controller\n"); 1016 return(1); 1017 } 1018 sc->amr_maxdrives = 40; 1019 sc->amr_maxchan = ap->ap_nschan; 1020 sc->amr_maxio = ap->ap_maxio; 1021 sc->amr_type |= AMR_TYPE_40LD; 1022 kfree(ap, M_AMR); 1023 1024 ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status); 1025 if (ap != NULL) 1026 kfree(ap, M_AMR); 1027 if (!status) { 1028 sc->amr_ld_del_supported = 1; 1029 device_printf(sc->amr_dev, "delete logical drives supported by controller\n"); 1030 } 1031 } else { 1032 1033 /* failed, try the 8LD ENQUIRY commands */ 1034 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) { 1035 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) { 1036 device_printf(sc->amr_dev, "can't obtain configuration data from controller\n"); 1037 return(1); 1038 } 1039 ae->ae_signature = 0; 1040 } 1041 1042 /* 1043 * Fetch current state of logical drives. 1044 */ 1045 for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) { 1046 sc->amr_drive[ldrv].al_size = ae->ae_ldrv.al_size[ldrv]; 1047 sc->amr_drive[ldrv].al_state = ae->ae_ldrv.al_state[ldrv]; 1048 sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv]; 1049 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size, 1050 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties); 1051 } 1052 1053 sc->amr_maxdrives = 8; 1054 sc->amr_maxchan = ae->ae_adapter.aa_channels; 1055 sc->amr_maxio = ae->ae_adapter.aa_maxio; 1056 kfree(ae, M_AMR); 1057 } 1058 1059 /* 1060 * Mark remaining drives as unused. 1061 */ 1062 for (; ldrv < AMR_MAXLD; ldrv++) 1063 sc->amr_drive[ldrv].al_size = 0xffffffff; 1064 1065 /* 1066 * Cap the maximum number of outstanding I/Os. AMI's Linux driver doesn't trust 1067 * the controller's reported value, and lockups have been seen when we do. 1068 */ 1069 sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD); 1070 1071 return(0); 1072 } 1073 1074 /******************************************************************************** 1075 * Run a generic enquiry-style command. 1076 */ 1077 static void * 1078 amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status) 1079 { 1080 struct amr_command *ac; 1081 void *result; 1082 u_int8_t *mbox; 1083 int error; 1084 1085 debug_called(1); 1086 1087 error = 1; 1088 result = NULL; 1089 1090 /* get ourselves a command buffer */ 1091 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1092 ac = amr_alloccmd(sc); 1093 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1094 if (ac == NULL) 1095 goto out; 1096 /* allocate the response structure */ 1097 if ((result = kmalloc(bufsize, M_AMR, M_ZERO|M_NOWAIT)) == NULL) 1098 goto out; 1099 /* set command flags */ 1100 1101 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN; 1102 1103 /* point the command at our data */ 1104 ac->ac_data = result; 1105 ac->ac_length = bufsize; 1106 1107 /* build the command proper */ 1108 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */ 1109 mbox[0] = cmd; 1110 mbox[2] = cmdsub; 1111 mbox[3] = cmdqual; 1112 *status = 0; 1113 1114 /* can't assume that interrupts are going to work here, so play it safe */ 1115 if (sc->amr_poll_command(ac)) 1116 goto out; 1117 error = ac->ac_status; 1118 *status = ac->ac_status; 1119 1120 out: 1121 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1122 if (ac != NULL) 1123 amr_releasecmd(ac); 1124 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1125 if ((error != 0) && (result != NULL)) { 1126 kfree(result, M_AMR); 1127 result = NULL; 1128 } 1129 return(result); 1130 } 1131 1132 /******************************************************************************** 1133 * Flush the controller's internal cache, return status. 1134 */ 1135 int 1136 amr_flush(struct amr_softc *sc) 1137 { 1138 struct amr_command *ac; 1139 int error; 1140 1141 /* get ourselves a command buffer */ 1142 error = 1; 1143 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1144 ac = amr_alloccmd(sc); 1145 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1146 if (ac == NULL) 1147 goto out; 1148 /* set command flags */ 1149 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; 1150 1151 /* build the command proper */ 1152 ac->ac_mailbox.mb_command = AMR_CMD_FLUSH; 1153 1154 /* we have to poll, as the system may be going down or otherwise damaged */ 1155 if (sc->amr_poll_command(ac)) 1156 goto out; 1157 error = ac->ac_status; 1158 1159 out: 1160 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1161 if (ac != NULL) 1162 amr_releasecmd(ac); 1163 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1164 return(error); 1165 } 1166 1167 /******************************************************************************** 1168 * Detect extented cdb >> greater than 10 byte cdb support 1169 * returns '1' means this support exist 1170 * returns '0' means this support doesn't exist 1171 */ 1172 static int 1173 amr_support_ext_cdb(struct amr_softc *sc) 1174 { 1175 struct amr_command *ac; 1176 u_int8_t *mbox; 1177 int error; 1178 1179 /* get ourselves a command buffer */ 1180 error = 0; 1181 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1182 ac = amr_alloccmd(sc); 1183 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1184 if (ac == NULL) 1185 goto out; 1186 /* set command flags */ 1187 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; 1188 1189 /* build the command proper */ 1190 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */ 1191 mbox[0] = 0xA4; 1192 mbox[2] = 0x16; 1193 1194 1195 /* we have to poll, as the system may be going down or otherwise damaged */ 1196 if (sc->amr_poll_command(ac)) 1197 goto out; 1198 if( ac->ac_status == AMR_STATUS_SUCCESS ) { 1199 error = 1; 1200 } 1201 1202 out: 1203 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1204 if (ac != NULL) 1205 amr_releasecmd(ac); 1206 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1207 return(error); 1208 } 1209 1210 /******************************************************************************** 1211 * Try to find I/O work for the controller from one or more of the work queues. 1212 * 1213 * We make the assumption that if the controller is not ready to take a command 1214 * at some given time, it will generate an interrupt at some later time when 1215 * it is. 1216 */ 1217 void 1218 amr_startio(struct amr_softc *sc) 1219 { 1220 struct amr_command *ac; 1221 1222 /* spin until something prevents us from doing any work */ 1223 for (;;) { 1224 1225 /* Don't bother to queue commands no bounce buffers are available. */ 1226 if (sc->amr_state & AMR_STATE_QUEUE_FRZN) 1227 break; 1228 1229 /* try to get a ready command */ 1230 ac = amr_dequeue_ready(sc); 1231 1232 /* if that failed, build a command from a bio */ 1233 if (ac == NULL) 1234 (void)amr_bio_command(sc, &ac); 1235 1236 /* if that failed, build a command from a ccb */ 1237 if ((ac == NULL) && (sc->amr_cam_command != NULL)) 1238 sc->amr_cam_command(sc, &ac); 1239 1240 /* if we don't have anything to do, give up */ 1241 if (ac == NULL) 1242 break; 1243 1244 /* try to give the command to the controller; if this fails save it for later and give up */ 1245 if (amr_start(ac)) { 1246 debug(2, "controller busy, command deferred"); 1247 amr_requeue_ready(ac); /* XXX schedule retry very soon? */ 1248 break; 1249 } 1250 } 1251 } 1252 1253 /******************************************************************************** 1254 * Handle completion of an I/O command. 1255 */ 1256 static void 1257 amr_completeio(struct amr_command *ac) 1258 { 1259 struct amr_softc *sc = ac->ac_sc; 1260 static struct timeval lastfail; 1261 static int curfail; 1262 struct buf *bp = ac->ac_bio->bio_buf; 1263 1264 if (ac->ac_status != AMR_STATUS_SUCCESS) { /* could be more verbose here? */ 1265 bp->b_error = EIO; 1266 bp->b_flags |= B_ERROR; 1267 1268 if (ppsratecheck(&lastfail, &curfail, 1)) 1269 device_printf(sc->amr_dev, "I/O error - 0x%x\n", ac->ac_status); 1270 /* amr_printcommand(ac);*/ 1271 } 1272 amrd_intr(ac->ac_bio); 1273 lockmgr(&ac->ac_sc->amr_list_lock, LK_EXCLUSIVE); 1274 amr_releasecmd(ac); 1275 lockmgr(&ac->ac_sc->amr_list_lock, LK_RELEASE); 1276 } 1277 1278 /******************************************************************************** 1279 ******************************************************************************** 1280 Command Processing 1281 ******************************************************************************** 1282 ********************************************************************************/ 1283 1284 /******************************************************************************** 1285 * Convert a bio off the top of the bio queue into a command. 1286 */ 1287 static int 1288 amr_bio_command(struct amr_softc *sc, struct amr_command **acp) 1289 { 1290 struct amr_command *ac; 1291 struct amrd_softc *amrd; 1292 struct bio *bio; 1293 struct buf *bp; 1294 int error; 1295 int blkcount; 1296 int driveno; 1297 int cmd; 1298 1299 ac = NULL; 1300 error = 0; 1301 1302 /* get a command */ 1303 if ((ac = amr_alloccmd(sc)) == NULL) 1304 return (ENOMEM); 1305 1306 /* get a bio to work on */ 1307 if ((bio = amr_dequeue_bio(sc)) == NULL) { 1308 amr_releasecmd(ac); 1309 return (0); 1310 } 1311 1312 /* connect the bio to the command */ 1313 bp = bio->bio_buf; 1314 ac->ac_complete = amr_completeio; 1315 ac->ac_bio = bio; 1316 ac->ac_data = bp->b_data; 1317 ac->ac_length = bp->b_bcount; 1318 cmd = 0; 1319 switch (bp->b_cmd) { 1320 case BUF_CMD_READ: 1321 ac->ac_flags |= AMR_CMD_DATAIN; 1322 if (AMR_IS_SG64(sc)) { 1323 cmd = AMR_CMD_LREAD64; 1324 ac->ac_flags |= AMR_CMD_SG64; 1325 } else 1326 cmd = AMR_CMD_LREAD; 1327 break; 1328 case BUF_CMD_WRITE: 1329 ac->ac_flags |= AMR_CMD_DATAOUT; 1330 if (AMR_IS_SG64(sc)) { 1331 cmd = AMR_CMD_LWRITE64; 1332 ac->ac_flags |= AMR_CMD_SG64; 1333 } else 1334 cmd = AMR_CMD_LWRITE; 1335 break; 1336 case BUF_CMD_FLUSH: 1337 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; 1338 cmd = AMR_CMD_FLUSH; 1339 break; 1340 default: 1341 panic("Invalid bio command"); 1342 } 1343 amrd = (struct amrd_softc *)bio->bio_driver_info; 1344 driveno = amrd->amrd_drive - sc->amr_drive; 1345 blkcount = (bp->b_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE; 1346 1347 ac->ac_mailbox.mb_command = cmd; 1348 if (bp->b_cmd & (BUF_CMD_READ|BUF_CMD_WRITE)) { 1349 ac->ac_mailbox.mb_blkcount = blkcount; 1350 ac->ac_mailbox.mb_lba = bio->bio_offset / AMR_BLKSIZE; 1351 if (((bio->bio_offset / AMR_BLKSIZE) + blkcount) > sc->amr_drive[driveno].al_size) { 1352 device_printf(sc->amr_dev, 1353 "I/O beyond end of unit (%lld,%d > %lu)\n", 1354 (long long)(bio->bio_offset / AMR_BLKSIZE), blkcount, 1355 (u_long)sc->amr_drive[driveno].al_size); 1356 } 1357 } 1358 ac->ac_mailbox.mb_drive = driveno; 1359 if (sc->amr_state & AMR_STATE_REMAP_LD) 1360 ac->ac_mailbox.mb_drive |= 0x80; 1361 1362 /* we fill in the s/g related data when the command is mapped */ 1363 1364 1365 *acp = ac; 1366 return(error); 1367 } 1368 1369 /******************************************************************************** 1370 * Take a command, submit it to the controller and sleep until it completes 1371 * or fails. Interrupts must be enabled, returns nonzero on error. 1372 */ 1373 static int 1374 amr_wait_command(struct amr_command *ac) 1375 { 1376 int error = 0; 1377 struct amr_softc *sc = ac->ac_sc; 1378 1379 debug_called(1); 1380 1381 ac->ac_complete = NULL; 1382 ac->ac_flags |= AMR_CMD_SLEEP; 1383 if ((error = amr_start(ac)) != 0) { 1384 return(error); 1385 } 1386 1387 while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) { 1388 error = lksleep(ac,&sc->amr_list_lock, 0, "amrwcmd", 0); 1389 } 1390 1391 return(error); 1392 } 1393 1394 /******************************************************************************** 1395 * Take a command, submit it to the controller and busy-wait for it to return. 1396 * Returns nonzero on error. Can be safely called with interrupts enabled. 1397 */ 1398 static int 1399 amr_std_poll_command(struct amr_command *ac) 1400 { 1401 struct amr_softc *sc = ac->ac_sc; 1402 int error, count; 1403 1404 debug_called(2); 1405 1406 ac->ac_complete = NULL; 1407 if ((error = amr_start(ac)) != 0) 1408 return(error); 1409 1410 count = 0; 1411 do { 1412 /* 1413 * Poll for completion, although the interrupt handler may beat us to it. 1414 * Note that the timeout here is somewhat arbitrary. 1415 */ 1416 amr_done(sc); 1417 DELAY(1000); 1418 } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000)); 1419 if (!(ac->ac_flags & AMR_CMD_BUSY)) { 1420 error = 0; 1421 } else { 1422 /* XXX the slot is now marked permanently busy */ 1423 error = EIO; 1424 device_printf(sc->amr_dev, "polled command timeout\n"); 1425 } 1426 return(error); 1427 } 1428 1429 static void 1430 amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err) 1431 { 1432 struct amr_command *ac = arg; 1433 struct amr_softc *sc = ac->ac_sc; 1434 int mb_channel; 1435 1436 if (err) { 1437 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__); 1438 ac->ac_status = AMR_STATUS_ABORTED; 1439 return; 1440 } 1441 1442 amr_setup_sg(arg, segs, nsegs, err); 1443 1444 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */ 1445 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel; 1446 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG && 1447 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) || 1448 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG))) 1449 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments; 1450 1451 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments; 1452 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr; 1453 if (AC_IS_SG64(ac)) { 1454 ac->ac_sg64_hi = 0; 1455 ac->ac_sg64_lo = ac->ac_sgbusaddr; 1456 } 1457 1458 sc->amr_poll_command1(sc, ac); 1459 } 1460 1461 /******************************************************************************** 1462 * Take a command, submit it to the controller and busy-wait for it to return. 1463 * Returns nonzero on error. Can be safely called with interrupts enabled. 1464 */ 1465 static int 1466 amr_quartz_poll_command(struct amr_command *ac) 1467 { 1468 struct amr_softc *sc = ac->ac_sc; 1469 int error; 1470 1471 debug_called(2); 1472 1473 error = 0; 1474 1475 if (AC_IS_SG64(ac)) { 1476 ac->ac_tag = sc->amr_buffer64_dmat; 1477 ac->ac_datamap = ac->ac_dma64map; 1478 } else { 1479 ac->ac_tag = sc->amr_buffer_dmat; 1480 ac->ac_datamap = ac->ac_dmamap; 1481 } 1482 1483 /* now we have a slot, we can map the command (unmapped in amr_complete) */ 1484 if (ac->ac_data != NULL && ac->ac_length != 0) { 1485 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data, 1486 ac->ac_length, amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) { 1487 error = 1; 1488 } 1489 } else { 1490 error = amr_quartz_poll_command1(sc, ac); 1491 } 1492 1493 return (error); 1494 } 1495 1496 static int 1497 amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac) 1498 { 1499 int count, error; 1500 1501 lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE); 1502 if ((sc->amr_state & AMR_STATE_INTEN) == 0) { 1503 count=0; 1504 while (sc->amr_busyslots) { 1505 lksleep(sc, &sc->amr_hw_lock, PCATCH, "amrpoll", hz); 1506 if(count++>10) { 1507 break; 1508 } 1509 } 1510 1511 if(sc->amr_busyslots) { 1512 device_printf(sc->amr_dev, "adapter is busy\n"); 1513 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 1514 if (ac->ac_data != NULL) { 1515 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap); 1516 } 1517 ac->ac_status=0; 1518 return(1); 1519 } 1520 } 1521 1522 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE); 1523 1524 /* clear the poll/ack fields in the mailbox */ 1525 sc->amr_mailbox->mb_ident = 0xFE; 1526 sc->amr_mailbox->mb_nstatus = 0xFF; 1527 sc->amr_mailbox->mb_status = 0xFF; 1528 sc->amr_mailbox->mb_poll = 0; 1529 sc->amr_mailbox->mb_ack = 0; 1530 sc->amr_mailbox->mb_busy = 1; 1531 1532 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT); 1533 1534 while(sc->amr_mailbox->mb_nstatus == 0xFF) 1535 DELAY(1); 1536 while(sc->amr_mailbox->mb_status == 0xFF) 1537 DELAY(1); 1538 ac->ac_status=sc->amr_mailbox->mb_status; 1539 error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0; 1540 while(sc->amr_mailbox->mb_poll != 0x77) 1541 DELAY(1); 1542 sc->amr_mailbox->mb_poll = 0; 1543 sc->amr_mailbox->mb_ack = 0x77; 1544 1545 /* acknowledge that we have the commands */ 1546 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK); 1547 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK) 1548 DELAY(1); 1549 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 1550 1551 /* unmap the command's data buffer */ 1552 if (ac->ac_flags & AMR_CMD_DATAIN) { 1553 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTREAD); 1554 } 1555 if (ac->ac_flags & AMR_CMD_DATAOUT) { 1556 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTWRITE); 1557 } 1558 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap); 1559 1560 return(error); 1561 } 1562 1563 static __inline int 1564 amr_freeslot(struct amr_command *ac) 1565 { 1566 struct amr_softc *sc = ac->ac_sc; 1567 int slot; 1568 1569 debug_called(3); 1570 1571 slot = ac->ac_slot; 1572 if (sc->amr_busycmd[slot] == NULL) 1573 panic("amr: slot %d not busy?", slot); 1574 1575 sc->amr_busycmd[slot] = NULL; 1576 atomic_subtract_int(&sc->amr_busyslots, 1); 1577 1578 return (0); 1579 } 1580 1581 /******************************************************************************** 1582 * Map/unmap (ac)'s data in the controller's addressable space as required. 1583 * 1584 * These functions may be safely called multiple times on a given command. 1585 */ 1586 static void 1587 amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error) 1588 { 1589 struct amr_command *ac = (struct amr_command *)arg; 1590 struct amr_sgentry *sg; 1591 struct amr_sg64entry *sg64; 1592 int flags, i; 1593 1594 debug_called(3); 1595 1596 /* get base address of s/g table */ 1597 sg = ac->ac_sg.sg32; 1598 sg64 = ac->ac_sg.sg64; 1599 1600 if (AC_IS_SG64(ac)) { 1601 ac->ac_nsegments = nsegments; 1602 ac->ac_mb_physaddr = 0xffffffff; 1603 for (i = 0; i < nsegments; i++, sg64++) { 1604 sg64->sg_addr = segs[i].ds_addr; 1605 sg64->sg_count = segs[i].ds_len; 1606 } 1607 } else { 1608 /* decide whether we need to populate the s/g table */ 1609 if (nsegments < 2) { 1610 ac->ac_nsegments = 0; 1611 ac->ac_mb_physaddr = segs[0].ds_addr; 1612 } else { 1613 ac->ac_nsegments = nsegments; 1614 ac->ac_mb_physaddr = ac->ac_sgbusaddr; 1615 for (i = 0; i < nsegments; i++, sg++) { 1616 sg->sg_addr = segs[i].ds_addr; 1617 sg->sg_count = segs[i].ds_len; 1618 } 1619 } 1620 } 1621 1622 flags = 0; 1623 if (ac->ac_flags & AMR_CMD_DATAIN) 1624 flags |= BUS_DMASYNC_PREREAD; 1625 if (ac->ac_flags & AMR_CMD_DATAOUT) 1626 flags |= BUS_DMASYNC_PREWRITE; 1627 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flags); 1628 ac->ac_flags |= AMR_CMD_MAPPED; 1629 } 1630 1631 static void 1632 amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegs, int err) 1633 { 1634 struct amr_command *ac = arg; 1635 struct amr_softc *sc = ac->ac_sc; 1636 int mb_channel; 1637 1638 if (err) { 1639 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__); 1640 amr_abort_load(ac); 1641 return; 1642 } 1643 1644 amr_setup_sg(arg, segs, nsegs, err); 1645 1646 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */ 1647 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel; 1648 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG && 1649 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) || 1650 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG))) 1651 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments; 1652 1653 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments; 1654 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr; 1655 if (AC_IS_SG64(ac)) { 1656 ac->ac_sg64_hi = 0; 1657 ac->ac_sg64_lo = ac->ac_sgbusaddr; 1658 } 1659 1660 if (sc->amr_submit_command(ac) == EBUSY) { 1661 amr_freeslot(ac); 1662 amr_requeue_ready(ac); 1663 } 1664 } 1665 1666 static void 1667 amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegs, int err) 1668 { 1669 struct amr_command *ac = arg; 1670 struct amr_softc *sc = ac->ac_sc; 1671 struct amr_passthrough *ap = &ac->ac_ccb->ccb_pthru; 1672 struct amr_ext_passthrough *aep = &ac->ac_ccb->ccb_epthru; 1673 1674 if (err) { 1675 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__); 1676 amr_abort_load(ac); 1677 return; 1678 } 1679 1680 /* Set up the mailbox portion of the command to point at the ccb */ 1681 ac->ac_mailbox.mb_nsgelem = 0; 1682 ac->ac_mailbox.mb_physaddr = ac->ac_ccb_busaddr; 1683 1684 amr_setup_sg(arg, segs, nsegs, err); 1685 1686 switch (ac->ac_mailbox.mb_command) { 1687 case AMR_CMD_EXTPASS: 1688 aep->ap_no_sg_elements = ac->ac_nsegments; 1689 aep->ap_data_transfer_address = ac->ac_mb_physaddr; 1690 break; 1691 case AMR_CMD_PASS: 1692 ap->ap_no_sg_elements = ac->ac_nsegments; 1693 ap->ap_data_transfer_address = ac->ac_mb_physaddr; 1694 break; 1695 default: 1696 panic("Unknown ccb command"); 1697 } 1698 1699 if (sc->amr_submit_command(ac) == EBUSY) { 1700 amr_freeslot(ac); 1701 amr_requeue_ready(ac); 1702 } 1703 } 1704 1705 static int 1706 amr_mapcmd(struct amr_command *ac) 1707 { 1708 bus_dmamap_callback_t *cb; 1709 struct amr_softc *sc = ac->ac_sc; 1710 1711 debug_called(3); 1712 1713 if (AC_IS_SG64(ac)) { 1714 ac->ac_tag = sc->amr_buffer64_dmat; 1715 ac->ac_datamap = ac->ac_dma64map; 1716 } else { 1717 ac->ac_tag = sc->amr_buffer_dmat; 1718 ac->ac_datamap = ac->ac_dmamap; 1719 } 1720 1721 if (ac->ac_flags & AMR_CMD_CCB) 1722 cb = amr_setup_ccb; 1723 else 1724 cb = amr_setup_data; 1725 1726 /* if the command involves data at all, and hasn't been mapped */ 1727 if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) { 1728 /* map the data buffers into bus space and build the s/g list */ 1729 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data, 1730 ac->ac_length, cb, ac, 0) == EINPROGRESS) { 1731 sc->amr_state |= AMR_STATE_QUEUE_FRZN; 1732 } 1733 } else { 1734 if (sc->amr_submit_command(ac) == EBUSY) { 1735 amr_freeslot(ac); 1736 amr_requeue_ready(ac); 1737 } 1738 } 1739 1740 return (0); 1741 } 1742 1743 static void 1744 amr_unmapcmd(struct amr_command *ac) 1745 { 1746 int flag; 1747 1748 debug_called(3); 1749 1750 /* if the command involved data at all and was mapped */ 1751 if (ac->ac_flags & AMR_CMD_MAPPED) { 1752 1753 if (ac->ac_data != NULL) { 1754 1755 flag = 0; 1756 if (ac->ac_flags & AMR_CMD_DATAIN) 1757 flag |= BUS_DMASYNC_POSTREAD; 1758 if (ac->ac_flags & AMR_CMD_DATAOUT) 1759 flag |= BUS_DMASYNC_POSTWRITE; 1760 1761 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flag); 1762 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap); 1763 } 1764 1765 ac->ac_flags &= ~AMR_CMD_MAPPED; 1766 } 1767 } 1768 1769 static void 1770 amr_abort_load(struct amr_command *ac) 1771 { 1772 ac_qhead_t head; 1773 struct amr_softc *sc = ac->ac_sc; 1774 1775 KKASSERT(lockstatus(&sc->amr_list_lock, curthread) != 0); 1776 1777 ac->ac_status = AMR_STATUS_ABORTED; 1778 amr_init_qhead(&head); 1779 amr_enqueue_completed(ac, &head); 1780 1781 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1782 amr_complete(sc, &head); 1783 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1784 } 1785 1786 /******************************************************************************** 1787 * Take a command and give it to the controller, returns 0 if successful, or 1788 * EBUSY if the command should be retried later. 1789 */ 1790 static int 1791 amr_start(struct amr_command *ac) 1792 { 1793 struct amr_softc *sc; 1794 int error = 0; 1795 int slot; 1796 1797 debug_called(3); 1798 1799 /* mark command as busy so that polling consumer can tell */ 1800 sc = ac->ac_sc; 1801 ac->ac_flags |= AMR_CMD_BUSY; 1802 1803 /* get a command slot (freed in amr_done) */ 1804 slot = ac->ac_slot; 1805 if (sc->amr_busycmd[slot] != NULL) 1806 panic("amr: slot %d busy?", slot); 1807 sc->amr_busycmd[slot] = ac; 1808 atomic_add_int(&sc->amr_busyslots, 1); 1809 1810 /* Now we have a slot, we can map the command (unmapped in amr_complete). */ 1811 if ((error = amr_mapcmd(ac)) == ENOMEM) { 1812 /* 1813 * Memroy resources are short, so free the slot and let this be tried 1814 * later. 1815 */ 1816 amr_freeslot(ac); 1817 } 1818 1819 return (error); 1820 } 1821 1822 /******************************************************************************** 1823 * Extract one or more completed commands from the controller (sc) 1824 * 1825 * Returns nonzero if any commands on the work queue were marked as completed. 1826 */ 1827 1828 int 1829 amr_done(struct amr_softc *sc) 1830 { 1831 ac_qhead_t head; 1832 struct amr_command *ac; 1833 struct amr_mailbox mbox; 1834 int i, idx, result; 1835 1836 debug_called(3); 1837 1838 /* See if there's anything for us to do */ 1839 result = 0; 1840 amr_init_qhead(&head); 1841 1842 /* loop collecting completed commands */ 1843 for (;;) { 1844 /* poll for a completed command's identifier and status */ 1845 if (sc->amr_get_work(sc, &mbox)) { 1846 result = 1; 1847 1848 /* iterate over completed commands in this result */ 1849 for (i = 0; i < mbox.mb_nstatus; i++) { 1850 /* get pointer to busy command */ 1851 idx = mbox.mb_completed[i] - 1; 1852 ac = sc->amr_busycmd[idx]; 1853 1854 /* really a busy command? */ 1855 if (ac != NULL) { 1856 1857 /* pull the command from the busy index */ 1858 amr_freeslot(ac); 1859 1860 /* save status for later use */ 1861 ac->ac_status = mbox.mb_status; 1862 amr_enqueue_completed(ac, &head); 1863 debug(3, "completed command with status %x", mbox.mb_status); 1864 } else { 1865 device_printf(sc->amr_dev, "bad slot %d completed\n", idx); 1866 } 1867 } 1868 } else 1869 break; /* no work */ 1870 } 1871 1872 /* handle completion and timeouts */ 1873 amr_complete(sc, &head); 1874 1875 return(result); 1876 } 1877 1878 /******************************************************************************** 1879 * Do completion processing on done commands on (sc) 1880 */ 1881 1882 static void 1883 amr_complete(void *context, ac_qhead_t *head) 1884 { 1885 struct amr_softc *sc = (struct amr_softc *)context; 1886 struct amr_command *ac; 1887 1888 debug_called(3); 1889 1890 /* pull completed commands off the queue */ 1891 for (;;) { 1892 ac = amr_dequeue_completed(sc, head); 1893 if (ac == NULL) 1894 break; 1895 1896 /* unmap the command's data buffer */ 1897 amr_unmapcmd(ac); 1898 1899 /* 1900 * Is there a completion handler? 1901 */ 1902 if (ac->ac_complete != NULL) { 1903 /* unbusy the command */ 1904 ac->ac_flags &= ~AMR_CMD_BUSY; 1905 ac->ac_complete(ac); 1906 1907 /* 1908 * Is someone sleeping on this one? 1909 */ 1910 } else { 1911 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1912 ac->ac_flags &= ~AMR_CMD_BUSY; 1913 if (ac->ac_flags & AMR_CMD_SLEEP) { 1914 /* unbusy the command */ 1915 wakeup(ac); 1916 } 1917 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1918 } 1919 1920 if(!sc->amr_busyslots) { 1921 wakeup(sc); 1922 } 1923 } 1924 1925 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1926 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN; 1927 amr_startio(sc); 1928 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1929 } 1930 1931 /******************************************************************************** 1932 ******************************************************************************** 1933 Command Buffer Management 1934 ******************************************************************************** 1935 ********************************************************************************/ 1936 1937 /******************************************************************************** 1938 * Get a new command buffer. 1939 * 1940 * This may return NULL in low-memory cases. 1941 * 1942 * If possible, we recycle a command buffer that's been used before. 1943 */ 1944 struct amr_command * 1945 amr_alloccmd(struct amr_softc *sc) 1946 { 1947 struct amr_command *ac; 1948 1949 debug_called(3); 1950 1951 ac = amr_dequeue_free(sc); 1952 if (ac == NULL) { 1953 sc->amr_state |= AMR_STATE_QUEUE_FRZN; 1954 return(NULL); 1955 } 1956 1957 /* clear out significant fields */ 1958 ac->ac_status = 0; 1959 bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox)); 1960 ac->ac_flags = 0; 1961 ac->ac_bio = NULL; 1962 ac->ac_data = NULL; 1963 ac->ac_complete = NULL; 1964 ac->ac_retries = 0; 1965 ac->ac_tag = NULL; 1966 ac->ac_datamap = NULL; 1967 return(ac); 1968 } 1969 1970 /******************************************************************************** 1971 * Release a command buffer for recycling. 1972 */ 1973 void 1974 amr_releasecmd(struct amr_command *ac) 1975 { 1976 debug_called(3); 1977 1978 amr_enqueue_free(ac); 1979 } 1980 1981 /******************************************************************************** 1982 * Allocate a new command cluster and initialise it. 1983 */ 1984 static void 1985 amr_alloccmd_cluster(struct amr_softc *sc) 1986 { 1987 struct amr_command_cluster *acc; 1988 struct amr_command *ac; 1989 int i, nextslot; 1990 1991 /* 1992 * If we haven't found the real limit yet, let us have a couple of 1993 * commands in order to be able to probe. 1994 */ 1995 if (sc->amr_maxio == 0) 1996 sc->amr_maxio = 2; 1997 1998 if (sc->amr_nextslot > sc->amr_maxio) 1999 return; 2000 acc = kmalloc(AMR_CMD_CLUSTERSIZE, M_AMR, M_NOWAIT | M_ZERO); 2001 if (acc != NULL) { 2002 nextslot = sc->amr_nextslot; 2003 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 2004 TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link); 2005 lockmgr(&sc->amr_list_lock, LK_RELEASE); 2006 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) { 2007 ac = &acc->acc_command[i]; 2008 ac->ac_sc = sc; 2009 ac->ac_slot = nextslot; 2010 2011 /* 2012 * The SG table for each slot is a fixed size and is assumed to 2013 * to hold 64-bit s/g objects when the driver is configured to do 2014 * 64-bit DMA. 32-bit DMA commands still use the same table, but 2015 * cast down to 32-bit objects. 2016 */ 2017 if (AMR_IS_SG64(sc)) { 2018 ac->ac_sgbusaddr = sc->amr_sgbusaddr + 2019 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry)); 2020 ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG); 2021 } else { 2022 ac->ac_sgbusaddr = sc->amr_sgbusaddr + 2023 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry)); 2024 ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG); 2025 } 2026 2027 ac->ac_ccb = sc->amr_ccb + ac->ac_slot; 2028 ac->ac_ccb_busaddr = sc->amr_ccb_busaddr + 2029 (ac->ac_slot * sizeof(union amr_ccb)); 2030 2031 if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap)) 2032 break; 2033 if (AMR_IS_SG64(sc) && 2034 (bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map))) 2035 break; 2036 amr_releasecmd(ac); 2037 if (++nextslot > sc->amr_maxio) 2038 break; 2039 } 2040 sc->amr_nextslot = nextslot; 2041 } 2042 } 2043 2044 /******************************************************************************** 2045 * Free a command cluster 2046 */ 2047 static void 2048 amr_freecmd_cluster(struct amr_command_cluster *acc) 2049 { 2050 struct amr_softc *sc = acc->acc_command[0].ac_sc; 2051 int i; 2052 2053 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) { 2054 if (acc->acc_command[i].ac_sc == NULL) 2055 break; 2056 bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap); 2057 if (AMR_IS_SG64(sc)) 2058 bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map); 2059 } 2060 kfree(acc, M_AMR); 2061 } 2062 2063 /******************************************************************************** 2064 ******************************************************************************** 2065 Interface-specific Shims 2066 ******************************************************************************** 2067 ********************************************************************************/ 2068 2069 /******************************************************************************** 2070 * Tell the controller that the mailbox contains a valid command 2071 */ 2072 static int 2073 amr_quartz_submit_command(struct amr_command *ac) 2074 { 2075 struct amr_softc *sc = ac->ac_sc; 2076 static struct timeval lastfail; 2077 static int curfail; 2078 int i = 0; 2079 2080 lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE); 2081 while (sc->amr_mailbox->mb_busy && (i++ < 10)) { 2082 DELAY(1); 2083 /* This is a no-op read that flushes pending mailbox updates */ 2084 AMR_QGET_ODB(sc); 2085 } 2086 if (sc->amr_mailbox->mb_busy) { 2087 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 2088 if (ac->ac_retries++ > 1000) { 2089 if (ppsratecheck(&lastfail, &curfail, 1)) 2090 device_printf(sc->amr_dev, "Too many retries on command %p. " 2091 "Controller is likely dead\n", ac); 2092 ac->ac_retries = 0; 2093 } 2094 return (EBUSY); 2095 } 2096 2097 /* 2098 * Save the slot number so that we can locate this command when complete. 2099 * Note that ident = 0 seems to be special, so we don't use it. 2100 */ 2101 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */ 2102 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14); 2103 sc->amr_mailbox->mb_busy = 1; 2104 sc->amr_mailbox->mb_poll = 0; 2105 sc->amr_mailbox->mb_ack = 0; 2106 sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi; 2107 sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo; 2108 2109 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT); 2110 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 2111 return(0); 2112 } 2113 2114 static int 2115 amr_std_submit_command(struct amr_command *ac) 2116 { 2117 struct amr_softc *sc = ac->ac_sc; 2118 static struct timeval lastfail; 2119 static int curfail; 2120 2121 lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE); 2122 if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) { 2123 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 2124 if (ac->ac_retries++ > 1000) { 2125 if (ppsratecheck(&lastfail, &curfail, 1)) 2126 device_printf(sc->amr_dev, "Too many retries on command %p. " 2127 "Controller is likely dead\n", ac); 2128 ac->ac_retries = 0; 2129 } 2130 return (EBUSY); 2131 } 2132 2133 /* 2134 * Save the slot number so that we can locate this command when complete. 2135 * Note that ident = 0 seems to be special, so we don't use it. 2136 */ 2137 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */ 2138 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14); 2139 sc->amr_mailbox->mb_busy = 1; 2140 sc->amr_mailbox->mb_poll = 0; 2141 sc->amr_mailbox->mb_ack = 0; 2142 2143 AMR_SPOST_COMMAND(sc); 2144 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 2145 return(0); 2146 } 2147 2148 /******************************************************************************** 2149 * Claim any work that the controller has completed; acknowledge completion, 2150 * save details of the completion in (mbsave) 2151 */ 2152 static int 2153 amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave) 2154 { 2155 int worked, i; 2156 u_int32_t outd; 2157 u_int8_t nstatus; 2158 u_int8_t completed[46]; 2159 2160 debug_called(3); 2161 2162 worked = 0; 2163 2164 /* work waiting for us? */ 2165 if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) { 2166 2167 /* acknowledge interrupt */ 2168 AMR_QPUT_ODB(sc, AMR_QODB_READY); 2169 2170 while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff) 2171 DELAY(1); 2172 sc->amr_mailbox->mb_nstatus = 0xff; 2173 2174 /* wait until fw wrote out all completions */ 2175 for (i = 0; i < nstatus; i++) { 2176 while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff) 2177 DELAY(1); 2178 sc->amr_mailbox->mb_completed[i] = 0xff; 2179 } 2180 2181 /* Save information for later processing */ 2182 mbsave->mb_nstatus = nstatus; 2183 mbsave->mb_status = sc->amr_mailbox->mb_status; 2184 sc->amr_mailbox->mb_status = 0xff; 2185 2186 for (i = 0; i < nstatus; i++) 2187 mbsave->mb_completed[i] = completed[i]; 2188 2189 /* acknowledge that we have the commands */ 2190 AMR_QPUT_IDB(sc, AMR_QIDB_ACK); 2191 2192 #if 0 2193 #ifndef AMR_QUARTZ_GOFASTER 2194 /* 2195 * This waits for the controller to notice that we've taken the 2196 * command from it. It's very inefficient, and we shouldn't do it, 2197 * but if we remove this code, we stop completing commands under 2198 * load. 2199 * 2200 * Peter J says we shouldn't do this. The documentation says we 2201 * should. Who is right? 2202 */ 2203 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK) 2204 ; /* XXX aiee! what if it dies? */ 2205 #endif 2206 #endif 2207 2208 worked = 1; /* got some work */ 2209 } 2210 2211 return(worked); 2212 } 2213 2214 static int 2215 amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave) 2216 { 2217 int worked; 2218 u_int8_t istat; 2219 2220 debug_called(3); 2221 2222 worked = 0; 2223 2224 /* check for valid interrupt status */ 2225 istat = AMR_SGET_ISTAT(sc); 2226 if ((istat & AMR_SINTR_VALID) != 0) { 2227 AMR_SPUT_ISTAT(sc, istat); /* ack interrupt status */ 2228 2229 /* save mailbox, which contains a list of completed commands */ 2230 bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave)); 2231 2232 AMR_SACK_INTERRUPT(sc); /* acknowledge we have the mailbox */ 2233 worked = 1; 2234 } 2235 2236 return(worked); 2237 } 2238 2239 /******************************************************************************** 2240 * Notify the controller of the mailbox location. 2241 */ 2242 static void 2243 amr_std_attach_mailbox(struct amr_softc *sc) 2244 { 2245 2246 /* program the mailbox physical address */ 2247 AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys & 0xff); 2248 AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >> 8) & 0xff); 2249 AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff); 2250 AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff); 2251 AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR); 2252 2253 /* clear any outstanding interrupt and enable interrupts proper */ 2254 AMR_SACK_INTERRUPT(sc); 2255 AMR_SENABLE_INTR(sc); 2256 } 2257 2258 #ifdef AMR_BOARD_INIT 2259 /******************************************************************************** 2260 * Initialise the controller 2261 */ 2262 static int 2263 amr_quartz_init(struct amr_softc *sc) 2264 { 2265 int status, ostatus; 2266 2267 device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc)); 2268 2269 AMR_QRESET(sc); 2270 2271 ostatus = 0xff; 2272 while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) { 2273 if (status != ostatus) { 2274 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status)); 2275 ostatus = status; 2276 } 2277 switch (status) { 2278 case AMR_QINIT_NOMEM: 2279 return(ENOMEM); 2280 2281 case AMR_QINIT_SCAN: 2282 /* XXX we could print channel/target here */ 2283 break; 2284 } 2285 } 2286 return(0); 2287 } 2288 2289 static int 2290 amr_std_init(struct amr_softc *sc) 2291 { 2292 int status, ostatus; 2293 2294 device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc)); 2295 2296 AMR_SRESET(sc); 2297 2298 ostatus = 0xff; 2299 while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) { 2300 if (status != ostatus) { 2301 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status)); 2302 ostatus = status; 2303 } 2304 switch (status) { 2305 case AMR_SINIT_NOMEM: 2306 return(ENOMEM); 2307 2308 case AMR_SINIT_INPROG: 2309 /* XXX we could print channel/target here? */ 2310 break; 2311 } 2312 } 2313 return(0); 2314 } 2315 #endif 2316 2317 /******************************************************************************** 2318 ******************************************************************************** 2319 Debugging 2320 ******************************************************************************** 2321 ********************************************************************************/ 2322 2323 /******************************************************************************** 2324 * Identify the controller and print some information about it. 2325 */ 2326 static void 2327 amr_describe_controller(struct amr_softc *sc) 2328 { 2329 struct amr_prodinfo *ap; 2330 struct amr_enquiry *ae; 2331 char *prod; 2332 int status; 2333 2334 /* 2335 * Try to get 40LD product info, which tells us what the card is labelled as. 2336 */ 2337 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) { 2338 device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n", 2339 ap->ap_product, ap->ap_firmware, ap->ap_bios, 2340 ap->ap_memsize); 2341 2342 kfree(ap, M_AMR); 2343 return; 2344 } 2345 2346 /* 2347 * Try 8LD extended ENQUIRY to get controller signature, and use lookup table. 2348 */ 2349 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) { 2350 prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature); 2351 2352 } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) { 2353 2354 /* 2355 * Try to work it out based on the PCI signatures. 2356 */ 2357 switch (pci_get_device(sc->amr_dev)) { 2358 case 0x9010: 2359 prod = "Series 428"; 2360 break; 2361 case 0x9060: 2362 prod = "Series 434"; 2363 break; 2364 default: 2365 prod = "unknown controller"; 2366 break; 2367 } 2368 } else { 2369 device_printf(sc->amr_dev, "<unsupported controller>\n"); 2370 return; 2371 } 2372 2373 /* 2374 * HP NetRaid controllers have a special encoding of the firmware and 2375 * BIOS versions. The AMI version seems to have it as strings whereas 2376 * the HP version does it with a leading uppercase character and two 2377 * binary numbers. 2378 */ 2379 2380 if(ae->ae_adapter.aa_firmware[2] >= 'A' && 2381 ae->ae_adapter.aa_firmware[2] <= 'Z' && 2382 ae->ae_adapter.aa_firmware[1] < ' ' && 2383 ae->ae_adapter.aa_firmware[0] < ' ' && 2384 ae->ae_adapter.aa_bios[2] >= 'A' && 2385 ae->ae_adapter.aa_bios[2] <= 'Z' && 2386 ae->ae_adapter.aa_bios[1] < ' ' && 2387 ae->ae_adapter.aa_bios[0] < ' ') { 2388 2389 /* this looks like we have an HP NetRaid version of the MegaRaid */ 2390 2391 if(ae->ae_signature == AMR_SIG_438) { 2392 /* the AMI 438 is a NetRaid 3si in HP-land */ 2393 prod = "HP NetRaid 3si"; 2394 } 2395 2396 device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n", 2397 prod, ae->ae_adapter.aa_firmware[2], 2398 ae->ae_adapter.aa_firmware[1], 2399 ae->ae_adapter.aa_firmware[0], 2400 ae->ae_adapter.aa_bios[2], 2401 ae->ae_adapter.aa_bios[1], 2402 ae->ae_adapter.aa_bios[0], 2403 ae->ae_adapter.aa_memorysize); 2404 } else { 2405 device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n", 2406 prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios, 2407 ae->ae_adapter.aa_memorysize); 2408 } 2409 kfree(ae, M_AMR); 2410 } 2411 2412 int 2413 amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks) 2414 { 2415 struct amr_command *ac; 2416 int error = EIO; 2417 2418 debug_called(1); 2419 2420 sc->amr_state |= AMR_STATE_INTEN; 2421 2422 /* get ourselves a command buffer */ 2423 if ((ac = amr_alloccmd(sc)) == NULL) 2424 goto out; 2425 /* set command flags */ 2426 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; 2427 2428 /* point the command at our data */ 2429 ac->ac_data = data; 2430 ac->ac_length = blks * AMR_BLKSIZE; 2431 2432 /* build the command proper */ 2433 ac->ac_mailbox.mb_command = AMR_CMD_LWRITE; 2434 ac->ac_mailbox.mb_blkcount = blks; 2435 ac->ac_mailbox.mb_lba = lba; 2436 ac->ac_mailbox.mb_drive = unit; 2437 2438 /* can't assume that interrupts are going to work here, so play it safe */ 2439 if (sc->amr_poll_command(ac)) 2440 goto out; 2441 error = ac->ac_status; 2442 2443 out: 2444 if (ac != NULL) 2445 amr_releasecmd(ac); 2446 2447 sc->amr_state &= ~AMR_STATE_INTEN; 2448 return (error); 2449 } 2450 2451 2452 2453 #ifdef AMR_DEBUG 2454 /******************************************************************************** 2455 * Print the command (ac) in human-readable format 2456 */ 2457 #if 0 2458 static void 2459 amr_printcommand(struct amr_command *ac) 2460 { 2461 struct amr_softc *sc = ac->ac_sc; 2462 struct amr_sgentry *sg; 2463 int i; 2464 2465 device_printf(sc->amr_dev, "cmd %x ident %d drive %d\n", 2466 ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive); 2467 device_printf(sc->amr_dev, "blkcount %d lba %d\n", 2468 ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba); 2469 device_printf(sc->amr_dev, "virtaddr %p length %lu\n", ac->ac_data, (unsigned long)ac->ac_length); 2470 device_printf(sc->amr_dev, "sg physaddr %08x nsg %d\n", 2471 ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem); 2472 device_printf(sc->amr_dev, "ccb %p bio %p\n", ac->ac_ccb_data, ac->ac_bio); 2473 2474 /* get base address of s/g table */ 2475 sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG); 2476 for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++) 2477 device_printf(sc->amr_dev, " %x/%d\n", sg->sg_addr, sg->sg_count); 2478 } 2479 #endif 2480 #endif 2481