1 /* $NetBSD: amr.c,v 1.1 2002/01/30 14:35:45 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /*- 40 * Copyright (c) 1999,2000 Michael Smith 41 * Copyright (c) 2000 BSDi 42 * All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * from FreeBSD: amr_pci.c,v 1.5 2000/08/30 07:52:40 msmith Exp 66 * from FreeBSD: amr.c,v 1.16 2000/08/30 07:52:40 msmith Exp 67 */ 68 69 /* 70 * Driver for AMI RAID controllers. 71 */ 72 73 #include <sys/cdefs.h> 74 __KERNEL_RCSID(0, "$NetBSD: amr.c,v 1.1 2002/01/30 14:35:45 ad Exp $"); 75 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/kernel.h> 79 #include <sys/device.h> 80 #include <sys/queue.h> 81 #include <sys/proc.h> 82 #include <sys/buf.h> 83 #include <sys/malloc.h> 84 85 #include <uvm/uvm_extern.h> 86 87 #include <machine/endian.h> 88 #include <machine/bus.h> 89 90 #include <dev/pci/pcidevs.h> 91 #include <dev/pci/pcivar.h> 92 #include <dev/pci/amrreg.h> 93 #include <dev/pci/amrvar.h> 94 95 #if AMR_MAX_SEGS > 32 96 #error AMR_MAX_SEGS too high 97 #endif 98 99 #define AMR_ENQUIRY_BUFSIZE 2048 100 #define AMR_SGL_SIZE (sizeof(struct amr_sgentry) * 32) 101 102 void amr_attach(struct device *, struct device *, void *); 103 void *amr_enquire(struct amr_softc *, u_int8_t, u_int8_t, u_int8_t); 104 int amr_init(struct amr_softc *, const char *, 105 struct pci_attach_args *pa); 106 int amr_intr(void *); 107 int amr_match(struct device *, struct cfdata *, void *); 108 int amr_print(void *, const char *); 109 void amr_shutdown(void *); 110 int amr_submatch(struct device *, struct cfdata *, void *); 111 112 int amr_mbox_wait(struct amr_softc *); 113 int amr_quartz_get_work(struct amr_softc *, struct amr_mailbox *); 114 int amr_quartz_submit(struct amr_softc *, struct amr_ccb *); 115 int amr_std_get_work(struct amr_softc *, struct amr_mailbox *); 116 int amr_std_submit(struct amr_softc *, struct amr_ccb *); 117 118 static inline u_int8_t amr_inb(struct amr_softc *, int); 119 static inline u_int32_t amr_inl(struct amr_softc *, int); 120 static inline void amr_outb(struct amr_softc *, int, u_int8_t); 121 static inline void amr_outl(struct amr_softc *, int, u_int32_t); 122 123 struct cfattach amr_ca = { 124 sizeof(struct amr_softc), amr_match, amr_attach 125 }; 126 127 #define AT_QUARTZ 0x01 /* `Quartz' chipset */ 128 #define AT_SIG 0x02 /* Check for signature */ 129 130 struct amr_pci_type { 131 u_short apt_vendor; 132 u_short apt_product; 133 u_short apt_flags; 134 } static const amr_pci_type[] = { 135 { PCI_VENDOR_AMI, PCI_PRODUCT_AMI_MEGARAID, 0 }, 136 { PCI_VENDOR_AMI, PCI_PRODUCT_AMI_MEGARAID2, 0 }, 137 { PCI_VENDOR_AMI, PCI_PRODUCT_AMI_MEGARAID3, AT_QUARTZ }, 138 { PCI_VENDOR_INTEL, PCI_PRODUCT_AMI_MEGARAID3, AT_QUARTZ | AT_SIG } 139 }; 140 141 struct amr_typestr { 142 const char *at_str; 143 int at_sig; 144 } static const amr_typestr[] = { 145 { "Series 431", AMR_SIG_431 }, 146 { "Series 438", AMR_SIG_438 }, 147 { "Series 466", AMR_SIG_466 }, 148 { "Series 467", AMR_SIG_467 }, 149 { "Series 490", AMR_SIG_490 }, 150 { "Series 762", AMR_SIG_762 }, 151 { "HP NetRAID (T5)", AMR_SIG_T5 }, 152 { "HP NetRAID (T7)", AMR_SIG_T7 }, 153 }; 154 155 static void *amr_sdh; 156 157 static inline u_int8_t 158 amr_inb(struct amr_softc *amr, int off) 159 { 160 161 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 1, 162 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ); 163 return (bus_space_read_1(amr->amr_iot, amr->amr_ioh, off)); 164 } 165 166 static inline u_int32_t 167 amr_inl(struct amr_softc *amr, int off) 168 { 169 170 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 4, 171 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ); 172 return (bus_space_read_4(amr->amr_iot, amr->amr_ioh, off)); 173 } 174 175 static inline void 176 amr_outb(struct amr_softc *amr, int off, u_int8_t val) 177 { 178 179 bus_space_write_1(amr->amr_iot, amr->amr_ioh, off, val); 180 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 1, 181 BUS_SPACE_BARRIER_WRITE); 182 } 183 184 static inline void 185 amr_outl(struct amr_softc *amr, int off, u_int32_t val) 186 { 187 188 bus_space_write_4(amr->amr_iot, amr->amr_ioh, off, val); 189 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 4, 190 BUS_SPACE_BARRIER_WRITE); 191 } 192 193 /* 194 * Match a supported device. 195 */ 196 int 197 amr_match(struct device *parent, struct cfdata *match, void *aux) 198 { 199 struct pci_attach_args *pa; 200 pcireg_t s; 201 int i; 202 203 pa = (struct pci_attach_args *)aux; 204 205 /* 206 * Don't match the device if it's operating in I2O mode. In this 207 * case it should be handled by the `iop' driver. 208 */ 209 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_I2O) 210 return (0); 211 212 for (i = 0; i < sizeof(amr_pci_type) / sizeof(amr_pci_type[0]); i++) 213 if (PCI_VENDOR(pa->pa_id) == amr_pci_type[i].apt_vendor && 214 PCI_PRODUCT(pa->pa_id) == amr_pci_type[i].apt_product) 215 break; 216 217 if (i == sizeof(amr_pci_type) / sizeof(amr_pci_type[0])) 218 return (0); 219 220 if ((amr_pci_type[i].apt_flags & AT_SIG) == 0) 221 return (1); 222 223 s = pci_conf_read(pa->pa_pc, pa->pa_tag, AMR_QUARTZ_SIG_REG) & 0xffff; 224 return (s == AMR_QUARTZ_SIG0 || s == AMR_QUARTZ_SIG1); 225 } 226 227 /* 228 * Attach a supported device. XXX This doesn't fail gracefully, and may 229 * over-allocate resources. 230 */ 231 void 232 amr_attach(struct device *parent, struct device *self, void *aux) 233 { 234 bus_space_tag_t memt, iot; 235 bus_space_handle_t memh, ioh; 236 struct pci_attach_args *pa; 237 struct amr_attach_args amra; 238 const struct amr_pci_type *apt; 239 struct amr_softc *amr; 240 pci_chipset_tag_t pc; 241 pci_intr_handle_t ih; 242 const char *intrstr; 243 pcireg_t reg; 244 int rseg, i, size, rv, memreg, ioreg; 245 bus_dma_segment_t seg; 246 struct amr_ccb *ac; 247 248 amr = (struct amr_softc *)self; 249 pa = (struct pci_attach_args *)aux; 250 pc = pa->pa_pc; 251 252 for (i = 0; i < sizeof(amr_pci_type) / sizeof(amr_pci_type[0]); i++) 253 if (PCI_VENDOR(pa->pa_id) == amr_pci_type[i].apt_vendor && 254 PCI_PRODUCT(pa->pa_id) == amr_pci_type[i].apt_product) 255 break; 256 apt = amr_pci_type + i; 257 258 memreg = ioreg = 0; 259 for (i = 0x10; i <= 0x14; i += 4) { 260 reg = pci_conf_read(pc, pa->pa_tag, i); 261 switch (PCI_MAPREG_TYPE(reg)) { 262 case PCI_MAPREG_TYPE_MEM: 263 if (PCI_MAPREG_MEM_SIZE(reg) != 0) 264 memreg = i; 265 break; 266 case PCI_MAPREG_TYPE_IO: 267 if (PCI_MAPREG_IO_SIZE(reg) != 0) 268 ioreg = i; 269 break; 270 } 271 } 272 273 if (memreg != 0) 274 if (pci_mapreg_map(pa, memreg, PCI_MAPREG_TYPE_MEM, 0, 275 &memt, &memh, NULL, NULL)) 276 memreg = 0; 277 if (ioreg != 0) 278 if (pci_mapreg_map(pa, ioreg, PCI_MAPREG_TYPE_IO, 0, 279 &iot, &ioh, NULL, NULL)) 280 ioreg = 0; 281 282 if (memreg) { 283 amr->amr_iot = memt; 284 amr->amr_ioh = memh; 285 } else if (ioreg) { 286 amr->amr_iot = iot; 287 amr->amr_ioh = ioh; 288 } else { 289 printf("can't map control registers\n"); 290 return; 291 } 292 293 amr->amr_dmat = pa->pa_dmat; 294 295 /* Enable the device. */ 296 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 297 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 298 reg | PCI_COMMAND_MASTER_ENABLE); 299 300 /* Map and establish the interrupt. */ 301 if (pci_intr_map(pa, &ih)) { 302 printf("can't map interrupt\n"); 303 return; 304 } 305 intrstr = pci_intr_string(pc, ih); 306 amr->amr_ih = pci_intr_establish(pc, ih, IPL_BIO, amr_intr, amr); 307 if (amr->amr_ih == NULL) { 308 printf("can't establish interrupt"); 309 if (intrstr != NULL) 310 printf(" at %s", intrstr); 311 printf("\n"); 312 return; 313 } 314 315 /* 316 * Allocate space for the mailbox and S/G lists. Some controllers 317 * don't like S/G lists to be located below 0x2000, so we allocate 318 * enough slop to enable us to compensate. 319 * 320 * The standard mailbox structure needs to be aligned on a 16-byte 321 * boundary. The 64-bit mailbox has one extra field, 4 bytes in 322 * size, which preceeds the standard mailbox. 323 */ 324 size = AMR_SGL_SIZE * AMR_MAX_CMDS + 0x2000; 325 326 if ((rv = bus_dmamem_alloc(amr->amr_dmat, size, PAGE_SIZE, NULL, &seg, 327 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 328 printf("%s: unable to allocate buffer, rv = %d\n", 329 amr->amr_dv.dv_xname, rv); 330 return; 331 } 332 333 if ((rv = bus_dmamem_map(amr->amr_dmat, &seg, rseg, size, 334 (caddr_t *)&amr->amr_mbox, 335 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 336 printf("%s: unable to map buffer, rv = %d\n", 337 amr->amr_dv.dv_xname, rv); 338 return; 339 } 340 341 if ((rv = bus_dmamap_create(amr->amr_dmat, size, 1, size, 0, 342 BUS_DMA_NOWAIT, &amr->amr_dmamap)) != 0) { 343 printf("%s: unable to create buffer DMA map, rv = %d\n", 344 amr->amr_dv.dv_xname, rv); 345 return; 346 } 347 348 if ((rv = bus_dmamap_load(amr->amr_dmat, amr->amr_dmamap, 349 amr->amr_mbox, size, NULL, BUS_DMA_NOWAIT)) != 0) { 350 printf("%s: unable to load buffer DMA map, rv = %d\n", 351 amr->amr_dv.dv_xname, rv); 352 return; 353 } 354 355 memset(amr->amr_mbox, 0, size); 356 357 amr->amr_mbox_paddr = amr->amr_dmamap->dm_segs[0].ds_addr + 16; 358 amr->amr_sgls_paddr = (amr->amr_mbox_paddr + 0x1fff) & ~0x1fff; 359 amr->amr_sgls = (struct amr_sgentry *)((caddr_t)amr->amr_mbox + 360 amr->amr_sgls_paddr - amr->amr_dmamap->dm_segs[0].ds_addr); 361 amr->amr_mbox = (struct amr_mailbox *)((caddr_t)amr->amr_mbox + 16); 362 363 /* 364 * Allocate and initalise the command control blocks. 365 */ 366 ac = malloc(sizeof(*ac) * AMR_MAX_CMDS, M_DEVBUF, M_NOWAIT | M_ZERO); 367 amr->amr_ccbs = ac; 368 SLIST_INIT(&amr->amr_ccb_freelist); 369 370 for (i = 0; i < AMR_MAX_CMDS; i++, ac++) { 371 rv = bus_dmamap_create(amr->amr_dmat, AMR_MAX_XFER, 372 AMR_MAX_SEGS, AMR_MAX_XFER, 0, 373 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 374 &ac->ac_xfer_map); 375 if (rv != 0) 376 break; 377 378 ac->ac_ident = i; 379 SLIST_INSERT_HEAD(&amr->amr_ccb_freelist, ac, ac_chain.slist); 380 } 381 if (i != AMR_MAX_CMDS) 382 printf("%s: %d/%d CCBs created\n", amr->amr_dv.dv_xname, 383 i, AMR_MAX_CMDS); 384 385 /* 386 * Take care of model-specific tasks. 387 */ 388 if ((apt->apt_flags & AT_QUARTZ) != 0) { 389 amr->amr_submit = amr_quartz_submit; 390 amr->amr_get_work = amr_quartz_get_work; 391 } else { 392 amr->amr_submit = amr_std_submit; 393 amr->amr_get_work = amr_std_get_work; 394 395 /* Notify the controller of the mailbox location. */ 396 amr_outl(amr, AMR_SREG_MBOX, amr->amr_mbox_paddr); 397 amr_outb(amr, AMR_SREG_MBOX_ENABLE, AMR_SMBOX_ENABLE_ADDR); 398 399 /* Clear outstanding interrupts and enable interrupts. */ 400 amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_ACKINTR); 401 amr_outb(amr, AMR_SREG_TOGL, 402 amr_inb(amr, AMR_SREG_TOGL) | AMR_STOGL_ENABLE); 403 } 404 405 /* 406 * Retrieve parameters, and tell the world about us. 407 */ 408 amr->amr_maxqueuecnt = i; 409 printf(": AMI RAID "); 410 if (amr_init(amr, intrstr, pa) != 0) 411 return; 412 413 /* 414 * Cap the maximum number of outstanding commands. AMI's Linux 415 * driver doesn't trust the controller's reported value, and lockups 416 * have been seen when we do. 417 */ 418 amr->amr_maxqueuecnt = min(amr->amr_maxqueuecnt, AMR_MAX_CMDS); 419 if (amr->amr_maxqueuecnt > i) 420 amr->amr_maxqueuecnt = i; 421 422 /* Set our `shutdownhook' before we start any device activity. */ 423 if (amr_sdh == NULL) 424 amr_sdh = shutdownhook_establish(amr_shutdown, NULL); 425 426 /* Attach sub-devices. */ 427 for (i = 0; i < amr->amr_numdrives; i++) { 428 if (amr->amr_drive[i].al_size == 0) 429 continue; 430 amra.amra_unit = i; 431 config_found_sm(&amr->amr_dv, &amra, amr_print, amr_submatch); 432 } 433 434 SIMPLEQ_INIT(&amr->amr_ccb_queue); 435 } 436 437 /* 438 * Print autoconfiguration message for a sub-device. 439 */ 440 int 441 amr_print(void *aux, const char *pnp) 442 { 443 struct amr_attach_args *amra; 444 445 amra = (struct amr_attach_args *)aux; 446 447 if (pnp != NULL) 448 printf("block device at %s", pnp); 449 printf(" unit %d", amra->amra_unit); 450 return (UNCONF); 451 } 452 453 /* 454 * Match a sub-device. 455 */ 456 int 457 amr_submatch(struct device *parent, struct cfdata *cf, void *aux) 458 { 459 struct amr_attach_args *amra; 460 461 amra = (struct amr_attach_args *)aux; 462 463 if (cf->amracf_unit != AMRCF_UNIT_DEFAULT && 464 cf->amracf_unit != amra->amra_unit) 465 return (0); 466 467 return ((*cf->cf_attach->ca_match)(parent, cf, aux)); 468 } 469 470 /* 471 * Retrieve operational parameters and describe the controller. 472 */ 473 int 474 amr_init(struct amr_softc *amr, const char *intrstr, 475 struct pci_attach_args *pa) 476 { 477 struct amr_prodinfo *ap; 478 struct amr_enquiry *ae; 479 struct amr_enquiry3 *aex; 480 const char *prodstr; 481 u_int i, sig; 482 char buf[64]; 483 484 /* 485 * Try to get 40LD product info, which tells us what the card is 486 * labelled as. 487 */ 488 ap = amr_enquire(amr, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0); 489 if (ap != NULL) { 490 printf("<%.80s>\n", ap->ap_product); 491 if (intrstr != NULL) 492 printf("%s: interrupting at %s\n", 493 amr->amr_dv.dv_xname, intrstr); 494 printf("%s: firmware %.16s, BIOS %.16s, %dMB RAM\n", 495 amr->amr_dv.dv_xname, ap->ap_firmware, ap->ap_bios, 496 le16toh(ap->ap_memsize)); 497 498 amr->amr_maxqueuecnt = ap->ap_maxio; 499 free(ap, M_DEVBUF); 500 501 /* 502 * Fetch and record state of logical drives. 503 */ 504 aex = amr_enquire(amr, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3, 505 AMR_CONFIG_ENQ3_SOLICITED_FULL); 506 if (aex == NULL) { 507 printf("%s ENQUIRY3 failed\n", amr->amr_dv.dv_xname); 508 return (-1); 509 } 510 511 if (aex->ae_numldrives > AMR_MAX_UNITS) { 512 printf("%s: adjust AMR_MAX_UNITS to %d (currently %d)" 513 "\n", amr->amr_dv.dv_xname, 514 ae->ae_ldrv.al_numdrives, AMR_MAX_UNITS); 515 amr->amr_numdrives = AMR_MAX_UNITS; 516 } else 517 amr->amr_numdrives = aex->ae_numldrives; 518 519 for (i = 0; i < amr->amr_numdrives; i++) { 520 amr->amr_drive[i].al_size = 521 le32toh(aex->ae_drivesize[i]); 522 amr->amr_drive[i].al_state = aex->ae_drivestate[i]; 523 amr->amr_drive[i].al_properties = aex->ae_driveprop[i]; 524 } 525 526 free(aex, M_DEVBUF); 527 return (0); 528 } 529 530 /* 531 * Try 8LD extended ENQUIRY to get the controller signature. Once 532 * found, search for a product description. 533 */ 534 if ((ae = amr_enquire(amr, AMR_CMD_EXT_ENQUIRY2, 0, 0)) != NULL) { 535 i = 0; 536 sig = le32toh(ae->ae_signature); 537 538 while (i < sizeof(amr_typestr) / sizeof(amr_typestr[0])) { 539 if (amr_typestr[i].at_sig == sig) 540 break; 541 i++; 542 } 543 if (i == sizeof(amr_typestr) / sizeof(amr_typestr[0])) { 544 sprintf(buf, "unknown ENQUIRY2 sig (0x%08x)", sig); 545 prodstr = buf; 546 } else 547 prodstr = amr_typestr[i].at_str; 548 } else { 549 if ((ae = amr_enquire(amr, AMR_CMD_ENQUIRY, 0, 0)) == NULL) { 550 printf("%s: unsupported controller\n", 551 amr->amr_dv.dv_xname); 552 return (-1); 553 } 554 555 switch (PCI_PRODUCT(pa->pa_id)) { 556 case PCI_PRODUCT_AMI_MEGARAID: 557 prodstr = "Series 428"; 558 break; 559 case PCI_PRODUCT_AMI_MEGARAID2: 560 prodstr = "Series 434"; 561 break; 562 default: 563 sprintf(buf, "unknown PCI dev (0x%04x)", 564 PCI_PRODUCT(pa->pa_id)); 565 prodstr = buf; 566 break; 567 } 568 } 569 570 printf("<%s>\n", prodstr); 571 if (intrstr != NULL) 572 printf("%s: interrupting at %s\n", amr->amr_dv.dv_xname, 573 intrstr); 574 printf("%s: firmware <%.4s>, BIOS <%.4s>, %dMB RAM\n", 575 amr->amr_dv.dv_xname, ae->ae_adapter.aa_firmware, 576 ae->ae_adapter.aa_bios, ae->ae_adapter.aa_memorysize); 577 578 amr->amr_maxqueuecnt = ae->ae_adapter.aa_maxio; 579 580 /* 581 * Record state of logical drives. 582 */ 583 if (ae->ae_ldrv.al_numdrives > AMR_MAX_UNITS) { 584 printf("%s: adjust AMR_MAX_UNITS to %d (currently %d)\n", 585 amr->amr_dv.dv_xname, ae->ae_ldrv.al_numdrives, 586 AMR_MAX_UNITS); 587 amr->amr_numdrives = AMR_MAX_UNITS; 588 } else 589 amr->amr_numdrives = ae->ae_ldrv.al_numdrives; 590 591 for (i = 0; i < AMR_MAX_UNITS; i++) { 592 amr->amr_drive[i].al_size = le32toh(ae->ae_ldrv.al_size[i]); 593 amr->amr_drive[i].al_state = ae->ae_ldrv.al_state[i]; 594 amr->amr_drive[i].al_properties = ae->ae_ldrv.al_properties[i]; 595 } 596 597 free(ae, M_DEVBUF); 598 return (0); 599 } 600 601 /* 602 * Flush the internal cache on each configured controller. Called at 603 * shutdown time. 604 */ 605 void 606 amr_shutdown(void *cookie) 607 { 608 extern struct cfdriver amr_cd; 609 struct amr_softc *amr; 610 struct amr_ccb *ac; 611 int i, rv; 612 613 for (i = 0; i < amr_cd.cd_ndevs; i++) { 614 if ((amr = device_lookup(&amr_cd, i)) == NULL) 615 continue; 616 617 if ((rv = amr_ccb_alloc(amr, &ac)) == 0) { 618 ac->ac_mbox.mb_command = AMR_CMD_FLUSH; 619 rv = amr_ccb_poll(amr, ac, 30000); 620 amr_ccb_free(amr, ac); 621 } 622 if (rv != 0) 623 printf("%s: unable to flush cache (%d)\n", 624 amr->amr_dv.dv_xname, rv); 625 } 626 } 627 628 /* 629 * Interrupt service routine. 630 */ 631 int 632 amr_intr(void *cookie) 633 { 634 struct amr_softc *amr; 635 struct amr_ccb *ac; 636 struct amr_mailbox mbox; 637 u_int i, forus, idx; 638 639 amr = cookie; 640 forus = 0; 641 642 while ((*amr->amr_get_work)(amr, &mbox) == 0) { 643 /* Iterate over completed commands in this result. */ 644 for (i = 0; i < mbox.mb_nstatus; i++) { 645 idx = mbox.mb_completed[i] - 1; 646 ac = amr->amr_ccbs + idx; 647 648 if (idx >= amr->amr_maxqueuecnt) { 649 printf("%s: bad status (bogus ID: %u=%u)\n", 650 amr->amr_dv.dv_xname, i, idx); 651 continue; 652 } 653 654 if ((ac->ac_flags & AC_ACTIVE) == 0) { 655 printf("%s: bad status (not active; 0x04%x)\n", 656 amr->amr_dv.dv_xname, ac->ac_flags); 657 continue; 658 } 659 660 ac->ac_status = mbox.mb_status; 661 ac->ac_flags = (ac->ac_flags & ~AC_ACTIVE) | 662 AC_COMPLETE; 663 664 /* Pass notification to upper layers. */ 665 if (ac->ac_handler != NULL) 666 (*ac->ac_handler)(ac); 667 } 668 forus = 1; 669 } 670 671 if (forus) 672 amr_ccb_enqueue(amr, NULL); 673 return (forus); 674 } 675 676 /* 677 * Run a generic enquiry-style command. 678 */ 679 void * 680 amr_enquire(struct amr_softc *amr, u_int8_t cmd, u_int8_t cmdsub, 681 u_int8_t cmdqual) 682 { 683 struct amr_ccb *ac; 684 u_int8_t *mb; 685 void *buf; 686 int rv; 687 688 if (amr_ccb_alloc(amr, &ac) != 0) 689 return (NULL); 690 buf = malloc(AMR_ENQUIRY_BUFSIZE, M_DEVBUF, M_NOWAIT); 691 692 /* Build the command proper. */ 693 mb = (u_int8_t *)&ac->ac_mbox; 694 mb[0] = cmd; 695 mb[2] = cmdsub; 696 mb[3] = cmdqual; 697 698 if ((rv = amr_ccb_map(amr, ac, buf, AMR_ENQUIRY_BUFSIZE, 0)) == 0) { 699 rv = amr_ccb_poll(amr, ac, 2000); 700 amr_ccb_unmap(amr, ac); 701 } 702 703 amr_ccb_free(amr, ac); 704 705 if (rv != 0) { 706 free(buf, M_DEVBUF); 707 buf = NULL; 708 } 709 710 return (buf); 711 } 712 713 /* 714 * Allocate and initialise a CCB. 715 */ 716 int 717 amr_ccb_alloc(struct amr_softc *amr, struct amr_ccb **acp) 718 { 719 struct amr_ccb *ac; 720 struct amr_mailbox *mb; 721 int s; 722 723 s = splbio(); 724 if ((ac = SLIST_FIRST(&amr->amr_ccb_freelist)) == NULL) { 725 splx(s); 726 return (EAGAIN); 727 } 728 SLIST_REMOVE_HEAD(&amr->amr_ccb_freelist, ac_chain.slist); 729 splx(s); 730 731 ac->ac_handler = NULL; 732 mb = &ac->ac_mbox; 733 *acp = ac; 734 735 memset(mb, 0, sizeof(*mb)); 736 737 mb->mb_ident = ac->ac_ident + 1; 738 mb->mb_busy = 1; 739 mb->mb_poll = 0; 740 mb->mb_ack = 0; 741 742 return (0); 743 } 744 745 /* 746 * Free a CCB. 747 */ 748 void 749 amr_ccb_free(struct amr_softc *amr, struct amr_ccb *ac) 750 { 751 int s; 752 753 ac->ac_flags = 0; 754 755 s = splbio(); 756 SLIST_INSERT_HEAD(&amr->amr_ccb_freelist, ac, ac_chain.slist); 757 splx(s); 758 } 759 760 /* 761 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in 762 * the order that they were enqueued and try to submit their command blocks 763 * to the controller for execution. 764 */ 765 void 766 amr_ccb_enqueue(struct amr_softc *amr, struct amr_ccb *ac) 767 { 768 int s; 769 770 s = splbio(); 771 772 if (ac != NULL) 773 SIMPLEQ_INSERT_TAIL(&amr->amr_ccb_queue, ac, ac_chain.simpleq); 774 775 while ((ac = SIMPLEQ_FIRST(&amr->amr_ccb_queue)) != NULL) { 776 if ((*amr->amr_submit)(amr, ac) != 0) 777 break; 778 SIMPLEQ_REMOVE_HEAD(&amr->amr_ccb_queue, ac, ac_chain.simpleq); 779 } 780 781 splx(s); 782 } 783 784 /* 785 * Map the specified CCB's data buffer onto the bus, and fill the 786 * scatter-gather list. 787 */ 788 int 789 amr_ccb_map(struct amr_softc *amr, struct amr_ccb *ac, void *data, int size, 790 int out) 791 { 792 struct amr_sgentry *sge; 793 struct amr_mailbox *mb; 794 int nsegs, i, rv, sgloff; 795 bus_dmamap_t xfer; 796 797 xfer = ac->ac_xfer_map; 798 799 rv = bus_dmamap_load(amr->amr_dmat, xfer, data, size, NULL, 800 BUS_DMA_NOWAIT); 801 if (rv != 0) 802 return (rv); 803 804 mb = &ac->ac_mbox; 805 ac->ac_xfer_size = size; 806 ac->ac_flags |= (out ? AC_XFER_OUT : AC_XFER_IN); 807 sgloff = AMR_SGL_SIZE * ac->ac_ident; 808 809 /* We don't need to use a scatter/gather list for just 1 segment. */ 810 nsegs = xfer->dm_nsegs; 811 if (nsegs == 1) { 812 mb->mb_nsgelem = 0; 813 mb->mb_physaddr = htole32(xfer->dm_segs[0].ds_addr); 814 ac->ac_flags |= AC_NOSGL; 815 } else { 816 mb->mb_nsgelem = nsegs; 817 mb->mb_physaddr = htole32(amr->amr_sgls_paddr + sgloff); 818 819 sge = (struct amr_sgentry *)((caddr_t)amr->amr_sgls + sgloff); 820 for (i = 0; i < nsegs; i++, sge++) { 821 sge->sge_addr = htole32(xfer->dm_segs[i].ds_addr); 822 sge->sge_count = htole32(xfer->dm_segs[i].ds_len); 823 } 824 } 825 826 bus_dmamap_sync(amr->amr_dmat, xfer, 0, ac->ac_xfer_size, 827 out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD); 828 829 if ((ac->ac_flags & AC_NOSGL) == 0) 830 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, sgloff, 831 AMR_SGL_SIZE, BUS_DMASYNC_PREWRITE); 832 833 return (0); 834 } 835 836 /* 837 * Unmap the specified CCB's data buffer. 838 */ 839 void 840 amr_ccb_unmap(struct amr_softc *amr, struct amr_ccb *ac) 841 { 842 843 if ((ac->ac_flags & AC_NOSGL) == 0) 844 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 845 AMR_SGL_SIZE * ac->ac_ident, AMR_SGL_SIZE, 846 BUS_DMASYNC_POSTWRITE); 847 bus_dmamap_sync(amr->amr_dmat, ac->ac_xfer_map, 0, ac->ac_xfer_size, 848 (ac->ac_flags & AC_XFER_IN) != 0 ? 849 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 850 bus_dmamap_unload(amr->amr_dmat, ac->ac_xfer_map); 851 } 852 853 /* 854 * Submit a command to the controller and poll on completion. Return 855 * non-zero on timeout or error. Must be called with interrupts blocked. 856 */ 857 int 858 amr_ccb_poll(struct amr_softc *amr, struct amr_ccb *ac, int timo) 859 { 860 int rv; 861 862 if ((rv = (*amr->amr_submit)(amr, ac)) != 0) 863 return (rv); 864 865 for (timo *= 10; timo != 0; timo--) { 866 amr_intr(amr); 867 if ((ac->ac_flags & AC_COMPLETE) != 0) 868 break; 869 DELAY(100); 870 } 871 872 return (timo == 0 || ac->ac_status != 0 ? EIO : 0); 873 } 874 875 /* 876 * Wait for the mailbox to become available. 877 */ 878 int 879 amr_mbox_wait(struct amr_softc *amr) 880 { 881 int timo; 882 883 for (timo = 10000; timo != 0; timo--) { 884 if (amr->amr_mbox->mb_busy == 0) 885 break; 886 DELAY(100); 887 } 888 889 #if 0 890 if (timo != 0) 891 printf("%s: controller wedged\n", amr->amr_dv.dv_xname); 892 #endif 893 894 return (timo != 0 ? 0 : EIO); 895 } 896 897 /* 898 * Tell the controller that the mailbox contains a valid command. Must be 899 * called with interrupts blocked. 900 */ 901 int 902 amr_quartz_submit(struct amr_softc *amr, struct amr_ccb *ac) 903 { 904 u_int32_t v; 905 906 v = amr_inl(amr, AMR_QREG_IDB); 907 if ((v & (AMR_QIDB_SUBMIT | AMR_QIDB_ACK)) != 0) 908 return (EBUSY); 909 910 memcpy(amr->amr_mbox, &ac->ac_mbox, sizeof(ac->ac_mbox)); 911 912 ac->ac_flags |= AC_ACTIVE; 913 amr_outl(amr, AMR_QREG_IDB, amr->amr_mbox_paddr | AMR_QIDB_SUBMIT); 914 DELAY(10); 915 return (0); 916 } 917 918 int 919 amr_std_submit(struct amr_softc *amr, struct amr_ccb *ac) 920 { 921 922 if ((amr_inb(amr, AMR_SREG_MBOX_BUSY) & AMR_SMBOX_BUSY_FLAG) != 0) 923 return (EBUSY); 924 925 memcpy(amr->amr_mbox, &ac->ac_mbox, sizeof(ac->ac_mbox)); 926 927 ac->ac_flags |= AC_ACTIVE; 928 amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_POST); 929 return (0); 930 } 931 932 /* 933 * Claim any work that the controller has completed; acknowledge completion, 934 * save details of the completion in (mbsave). Must be called with 935 * interrupts blocked. 936 */ 937 int 938 amr_quartz_get_work(struct amr_softc *amr, struct amr_mailbox *mbsave) 939 { 940 u_int32_t v; 941 942 if (amr_mbox_wait(amr)) 943 return (EBUSY); 944 945 v = amr_inl(amr, AMR_QREG_IDB); 946 if ((v & (AMR_QIDB_SUBMIT | AMR_QIDB_ACK)) != 0) 947 return (EBUSY); 948 949 /* Work waiting for us? */ 950 if (amr_inl(amr, AMR_QREG_ODB) != AMR_QODB_READY) 951 return (-1); 952 953 /* Save the mailbox, which contains a list of completed commands. */ 954 memcpy(mbsave, amr->amr_mbox, sizeof(*mbsave)); 955 956 /* Ack the interrupt and mailbox transfer. */ 957 amr_outl(amr, AMR_QREG_ODB, AMR_QODB_READY); 958 amr_outl(amr, AMR_QREG_IDB, amr->amr_mbox_paddr | AMR_QIDB_ACK); 959 DELAY(10); 960 961 #if 0 962 /* 963 * This waits for the controller to notice that we've taken the 964 * command from it. It's very inefficient, and we shouldn't do it, 965 * but if we remove this code, we stop completing commands under 966 * load. 967 * 968 * Peter J says we shouldn't do this. The documentation says we 969 * should. Who is right? 970 */ 971 while ((amr_inl(amr, AMR_QREG_IDB) & AMR_QIDB_ACK) != 0) 972 ; 973 #endif 974 975 return (0); 976 } 977 978 int 979 amr_std_get_work(struct amr_softc *amr, struct amr_mailbox *mbsave) 980 { 981 u_int8_t istat; 982 983 if (amr_mbox_wait(amr)) 984 return (EBUSY); 985 986 /* Puke if the mailbox is busy. */ 987 if ((amr_inb(amr, AMR_SREG_MBOX_BUSY) & AMR_SMBOX_BUSY_FLAG) != 0) 988 return (-1); 989 990 /* Check for valid interrupt status. */ 991 if (((istat = amr_inb(amr, AMR_SREG_INTR)) & AMR_SINTR_VALID) == 0) 992 return (-1); 993 994 /* Ack the interrupt. */ 995 amr_outb(amr, AMR_SREG_INTR, istat); 996 997 /* Save mailbox, which contains a list of completed commands. */ 998 memcpy(mbsave, amr->amr_mbox, sizeof(*mbsave)); 999 1000 /* Ack mailbox transfer. */ 1001 amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_ACKINTR); 1002 1003 return (0); 1004 } 1005