1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 2006 David Gwynne <dlg@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 * 18 * 19 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 20 * 21 * This code is derived from software contributed to The DragonFly Project 22 * by Matthew Dillon <dillon@backplane.com> 23 * 24 * Redistribution and use in source and binary forms, with or without 25 * modification, are permitted provided that the following conditions 26 * are met: 27 * 28 * 1. Redistributions of source code must retain the above copyright 29 * notice, this list of conditions and the following disclaimer. 30 * 2. Redistributions in binary form must reproduce the above copyright 31 * notice, this list of conditions and the following disclaimer in 32 * the documentation and/or other materials provided with the 33 * distribution. 34 * 3. Neither the name of The DragonFly Project nor the names of its 35 * contributors may be used to endorse or promote products derived 36 * from this software without specific, prior written permission. 37 * 38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 39 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 41 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 42 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 43 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 44 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 45 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 46 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 47 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 48 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 49 * SUCH DAMAGE. 50 * 51 * $OpenBSD: ahci.c,v 1.147 2009/02/16 21:19:07 miod Exp $ 52 */ 53 54 #include "ahci.h" 55 56 static int ahci_vt8251_attach(device_t); 57 static int ahci_ati_sb600_attach(device_t); 58 static int ahci_nvidia_mcp_attach(device_t); 59 static int ahci_pci_attach(device_t); 60 static int ahci_pci_detach(device_t); 61 62 static const struct ahci_device ahci_devices[] = { 63 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT8251_SATA, 64 ahci_vt8251_attach, ahci_pci_detach, "ViaTech-VT8251-SATA" }, 65 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB600_SATA, 66 ahci_ati_sb600_attach, ahci_pci_detach, "ATI-SB600-SATA" }, 67 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_2, 68 ahci_nvidia_mcp_attach, ahci_pci_detach, "NVidia-MCP65-SATA" }, 69 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_1, 70 ahci_nvidia_mcp_attach, ahci_pci_detach, "NVidia-MCP67-SATA" }, 71 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_5, 72 ahci_nvidia_mcp_attach, ahci_pci_detach, "NVidia-MCP77-SATA" }, 73 { 0, 0, 74 ahci_pci_attach, ahci_pci_detach, "AHCI-PCI-SATA" } 75 }; 76 77 struct ahci_pciid { 78 uint16_t ahci_vid; 79 uint16_t ahci_did; 80 int ahci_rev; 81 }; 82 83 static const struct ahci_pciid ahci_msi_blacklist[] = { 84 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB600_SATA, -1 }, 85 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB700_AHCI, -1 }, 86 87 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_88SE6121, -1 }, 88 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_88SE6145, -1 }, 89 90 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_1, 0xa1 }, 91 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_2, 0xa1 }, 92 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_3, 0xa1 }, 93 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_4, 0xa1 }, 94 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_5, 0xa1 }, 95 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_6, 0xa1 }, 96 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_7, 0xa1 }, 97 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_8, 0xa1 }, 98 99 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_1, 0xa2 }, 100 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_2, 0xa2 }, 101 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_3, 0xa2 }, 102 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_4, 0xa2 }, 103 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_5, 0xa2 }, 104 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_6, 0xa2 }, 105 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_7, 0xa2 }, 106 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_8, 0xa2 } 107 }; 108 109 static int ahci_msi_enable = 1; 110 TUNABLE_INT("hw.ahci.msi.enable", &ahci_msi_enable); 111 112 /* 113 * Match during probe and attach. The device does not yet have a softc. 114 */ 115 const struct ahci_device * 116 ahci_lookup_device(device_t dev) 117 { 118 const struct ahci_device *ad; 119 u_int16_t vendor = pci_get_vendor(dev); 120 u_int16_t product = pci_get_device(dev); 121 u_int8_t class = pci_get_class(dev); 122 u_int8_t subclass = pci_get_subclass(dev); 123 u_int8_t progif = pci_read_config(dev, PCIR_PROGIF, 1); 124 int is_ahci; 125 126 /* 127 * Generally speaking if the pci device does not identify as 128 * AHCI we skip it. 129 */ 130 if (class == PCIC_STORAGE && subclass == PCIS_STORAGE_SATA && 131 progif == PCIP_STORAGE_SATA_AHCI_1_0) { 132 is_ahci = 1; 133 } else { 134 is_ahci = 0; 135 } 136 137 for (ad = &ahci_devices[0]; ad->ad_vendor; ++ad) { 138 if (ad->ad_vendor == vendor && ad->ad_product == product) 139 return (ad); 140 } 141 142 /* 143 * Last ad is the default match if the PCI device matches SATA. 144 */ 145 if (is_ahci == 0) 146 ad = NULL; 147 return (ad); 148 } 149 150 /* 151 * Attach functions. They all eventually fall through to ahci_pci_attach(). 152 */ 153 static int 154 ahci_vt8251_attach(device_t dev) 155 { 156 struct ahci_softc *sc = device_get_softc(dev); 157 158 sc->sc_flags |= AHCI_F_NO_NCQ; 159 return (ahci_pci_attach(dev)); 160 } 161 162 static int 163 ahci_ati_sb600_attach(device_t dev) 164 { 165 struct ahci_softc *sc = device_get_softc(dev); 166 pcireg_t magic; 167 u_int8_t subclass = pci_get_subclass(dev); 168 u_int8_t revid; 169 170 if (subclass == PCIS_STORAGE_IDE) { 171 revid = pci_read_config(dev, PCIR_REVID, 1); 172 magic = pci_read_config(dev, AHCI_PCI_ATI_SB600_MAGIC, 4); 173 pci_write_config(dev, AHCI_PCI_ATI_SB600_MAGIC, 174 magic | AHCI_PCI_ATI_SB600_LOCKED, 4); 175 pci_write_config(dev, PCIR_REVID, 176 (PCIC_STORAGE << 24) | 177 (PCIS_STORAGE_SATA << 16) | 178 (PCIP_STORAGE_SATA_AHCI_1_0 << 8) | 179 revid, 4); 180 pci_write_config(dev, AHCI_PCI_ATI_SB600_MAGIC, magic, 4); 181 } 182 183 sc->sc_flags |= AHCI_F_IGN_FR; 184 return (ahci_pci_attach(dev)); 185 } 186 187 static int 188 ahci_nvidia_mcp_attach(device_t dev) 189 { 190 struct ahci_softc *sc = device_get_softc(dev); 191 192 sc->sc_flags |= AHCI_F_IGN_FR; 193 return (ahci_pci_attach(dev)); 194 } 195 196 static int 197 ahci_pci_attach(device_t dev) 198 { 199 struct ahci_softc *sc = device_get_softc(dev); 200 struct ahci_port *ap; 201 const char *gen; 202 uint16_t vid, did; 203 u_int32_t pi, reg; 204 u_int32_t cap, cap2; 205 u_int irq_flags; 206 bus_addr_t addr; 207 int i, error, msi_enable, rev, fbs; 208 const char *revision; 209 210 if (pci_read_config(dev, PCIR_COMMAND, 2) & 0x0400) { 211 device_printf(dev, "BIOS disabled PCI interrupt, " 212 "re-enabling\n"); 213 pci_write_config(dev, PCIR_COMMAND, 214 pci_read_config(dev, PCIR_COMMAND, 2) & ~0x0400, 2); 215 } 216 217 sc->sc_dev = dev; 218 219 /* 220 * Map the AHCI controller's IRQ and BAR(5) (hardware registers) 221 */ 222 223 msi_enable = ahci_msi_enable; 224 225 vid = pci_get_vendor(dev); 226 did = pci_get_device(dev); 227 rev = pci_get_revid(dev); 228 for (i = 0; i < NELEM(ahci_msi_blacklist); ++i) { 229 const struct ahci_pciid *id = &ahci_msi_blacklist[i]; 230 231 if (vid == id->ahci_vid && did == id->ahci_did) { 232 if (id->ahci_rev < 0 || id->ahci_rev == rev) { 233 msi_enable = 0; 234 break; 235 } 236 } 237 } 238 239 sc->sc_irq_type = pci_alloc_1intr(dev, msi_enable, 240 &sc->sc_rid_irq, &irq_flags); 241 242 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_rid_irq, 243 irq_flags); 244 if (sc->sc_irq == NULL) { 245 device_printf(dev, "unable to map interrupt\n"); 246 ahci_pci_detach(dev); 247 return (ENXIO); 248 } 249 250 /* 251 * When mapping the register window store the tag and handle 252 * separately so we can use the tag with per-port bus handle 253 * sub-spaces. 254 */ 255 sc->sc_rid_regs = PCIR_BAR(5); 256 sc->sc_regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 257 &sc->sc_rid_regs, RF_ACTIVE); 258 if (sc->sc_regs == NULL) { 259 device_printf(dev, "unable to map registers\n"); 260 ahci_pci_detach(dev); 261 return (ENXIO); 262 } 263 sc->sc_iot = rman_get_bustag(sc->sc_regs); 264 sc->sc_ioh = rman_get_bushandle(sc->sc_regs); 265 266 /* 267 * Initialize the chipset and then set the interrupt vector up 268 */ 269 error = ahci_init(sc); 270 if (error) { 271 ahci_pci_detach(dev); 272 return (ENXIO); 273 } 274 275 /* 276 * Get the AHCI capabilities and max number of concurrent 277 * command tags and set up the DMA tags. Adjust the saved 278 * sc_cap according to override flags. 279 */ 280 cap = sc->sc_cap = ahci_read(sc, AHCI_REG_CAP); 281 if (sc->sc_flags & AHCI_F_NO_NCQ) 282 sc->sc_cap &= ~AHCI_REG_CAP_SNCQ; 283 if (sc->sc_flags & AHCI_F_FORCE_FBSS) 284 sc->sc_cap |= AHCI_REG_CAP_FBSS; 285 286 /* 287 * We assume at least 4 commands. 288 */ 289 sc->sc_ncmds = AHCI_REG_CAP_NCS(cap); 290 if (sc->sc_ncmds < 4) { 291 device_printf(dev, "NCS must probe a value >= 4\n"); 292 ahci_pci_detach(dev); 293 return (ENXIO); 294 } 295 296 addr = (cap & AHCI_REG_CAP_S64A) ? 297 BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT; 298 299 /* 300 * DMA tags for allocation of DMA memory buffers, lists, and so 301 * forth. These are typically per-port. 302 * 303 * When FIS-based switching is supported we need a rfis for 304 * each target (4K total). The spec also requires 4K alignment 305 * for this case. 306 */ 307 fbs = (cap & AHCI_REG_CAP_FBSS) ? 16 : 1; 308 error = 0; 309 310 error += bus_dma_tag_create( 311 NULL, /* parent tag */ 312 256 * fbs, /* alignment */ 313 PAGE_SIZE, /* boundary */ 314 addr, /* loaddr? */ 315 BUS_SPACE_MAXADDR, /* hiaddr */ 316 NULL, /* filter */ 317 NULL, /* filterarg */ 318 sizeof(struct ahci_rfis) * fbs, /* [max]size */ 319 1, /* maxsegs */ 320 sizeof(struct ahci_rfis) * fbs, /* maxsegsz */ 321 0, /* flags */ 322 &sc->sc_tag_rfis); /* return tag */ 323 324 error += bus_dma_tag_create( 325 NULL, /* parent tag */ 326 32, /* alignment */ 327 4096 * 1024, /* boundary */ 328 addr, /* loaddr? */ 329 BUS_SPACE_MAXADDR, /* hiaddr */ 330 NULL, /* filter */ 331 NULL, /* filterarg */ 332 sc->sc_ncmds * sizeof(struct ahci_cmd_hdr), 333 1, /* maxsegs */ 334 sc->sc_ncmds * sizeof(struct ahci_cmd_hdr), 335 0, /* flags */ 336 &sc->sc_tag_cmdh); /* return tag */ 337 338 /* 339 * NOTE: ahci_cmd_table is sized to a power of 2 340 */ 341 error += bus_dma_tag_create( 342 NULL, /* parent tag */ 343 sizeof(struct ahci_cmd_table), /* alignment */ 344 4096 * 1024, /* boundary */ 345 addr, /* loaddr? */ 346 BUS_SPACE_MAXADDR, /* hiaddr */ 347 NULL, /* filter */ 348 NULL, /* filterarg */ 349 sc->sc_ncmds * sizeof(struct ahci_cmd_table), 350 1, /* maxsegs */ 351 sc->sc_ncmds * sizeof(struct ahci_cmd_table), 352 0, /* flags */ 353 &sc->sc_tag_cmdt); /* return tag */ 354 355 /* 356 * The data tag is used for later dmamaps and not immediately 357 * allocated. 358 */ 359 error += bus_dma_tag_create( 360 NULL, /* parent tag */ 361 4, /* alignment */ 362 0, /* boundary */ 363 addr, /* loaddr? */ 364 BUS_SPACE_MAXADDR, /* hiaddr */ 365 NULL, /* filter */ 366 NULL, /* filterarg */ 367 4096 * 1024, /* maxiosize */ 368 AHCI_MAX_PRDT, /* maxsegs */ 369 65536, /* maxsegsz */ 370 0, /* flags */ 371 &sc->sc_tag_data); /* return tag */ 372 373 if (error) { 374 device_printf(dev, "unable to create dma tags\n"); 375 ahci_pci_detach(dev); 376 return (ENXIO); 377 } 378 379 switch (cap & AHCI_REG_CAP_ISS) { 380 case AHCI_REG_CAP_ISS_G1: 381 gen = "1 (1.5Gbps)"; 382 break; 383 case AHCI_REG_CAP_ISS_G2: 384 gen = "2 (3Gbps)"; 385 break; 386 case AHCI_REG_CAP_ISS_G3: 387 gen = "3 (6Gbps)"; 388 break; 389 default: 390 gen = "unknown"; 391 break; 392 } 393 394 /* check the revision */ 395 reg = ahci_read(sc, AHCI_REG_VS); 396 397 switch (reg) { 398 case AHCI_REG_VS_0_95: 399 revision = "AHCI 0.95"; 400 break; 401 case AHCI_REG_VS_1_0: 402 revision = "AHCI 1.0"; 403 break; 404 case AHCI_REG_VS_1_1: 405 revision = "AHCI 1.1"; 406 break; 407 case AHCI_REG_VS_1_2: 408 revision = "AHCI 1.2"; 409 break; 410 case AHCI_REG_VS_1_3: 411 revision = "AHCI 1.3"; 412 break; 413 case AHCI_REG_VS_1_4: 414 revision = "AHCI 1.4"; 415 break; 416 case AHCI_REG_VS_1_5: 417 revision = "AHCI 1.5"; /* future will catch up to us */ 418 break; 419 default: 420 device_printf(sc->sc_dev, 421 "Warning: Unknown AHCI revision 0x%08x\n", reg); 422 revision = "AHCI <unknown>"; 423 break; 424 } 425 sc->sc_vers = reg; 426 427 if (reg >= AHCI_REG_VS_1_3) { 428 cap2 = ahci_read(sc, AHCI_REG_CAP2); 429 device_printf(dev, 430 "%s cap 0x%b cap2 0x%b, %d ports, " 431 "%d tags/port, gen %s\n", 432 revision, 433 cap, AHCI_FMT_CAP, 434 cap2, AHCI_FMT_CAP2, 435 AHCI_REG_CAP_NP(cap), sc->sc_ncmds, gen); 436 } else { 437 cap2 = 0; 438 device_printf(dev, 439 "%s cap 0x%b, %d ports, " 440 "%d tags/port, gen %s\n", 441 revision, 442 cap, AHCI_FMT_CAP, 443 AHCI_REG_CAP_NP(cap), sc->sc_ncmds, gen); 444 } 445 sc->sc_cap2 = cap2; 446 447 pi = ahci_read(sc, AHCI_REG_PI); 448 DPRINTF(AHCI_D_VERBOSE, "%s: ports implemented: 0x%08x\n", 449 DEVNAME(sc), pi); 450 451 #ifdef AHCI_COALESCE 452 /* Naive coalescing support - enable for all ports. */ 453 if (cap & AHCI_REG_CAP_CCCS) { 454 u_int16_t ccc_timeout = 20; 455 u_int8_t ccc_numcomplete = 12; 456 u_int32_t ccc_ctl; 457 458 /* disable coalescing during reconfiguration. */ 459 ccc_ctl = ahci_read(sc, AHCI_REG_CCC_CTL); 460 ccc_ctl &= ~0x00000001; 461 ahci_write(sc, AHCI_REG_CCC_CTL, ccc_ctl); 462 463 sc->sc_ccc_mask = 1 << AHCI_REG_CCC_CTL_INT(ccc_ctl); 464 if (pi & sc->sc_ccc_mask) { 465 /* A conflict with the implemented port list? */ 466 printf("%s: coalescing interrupt/implemented port list " 467 "conflict, PI: %08x, ccc_mask: %08x\n", 468 DEVNAME(sc), pi, sc->sc_ccc_mask); 469 sc->sc_ccc_mask = 0; 470 goto noccc; 471 } 472 473 /* ahci_port_start will enable each port when it starts. */ 474 sc->sc_ccc_ports = pi; 475 sc->sc_ccc_ports_cur = 0; 476 477 /* program thresholds and enable overall coalescing. */ 478 ccc_ctl &= ~0xffffff00; 479 ccc_ctl |= (ccc_timeout << 16) | (ccc_numcomplete << 8); 480 ahci_write(sc, AHCI_REG_CCC_CTL, ccc_ctl); 481 ahci_write(sc, AHCI_REG_CCC_PORTS, 0); 482 ahci_write(sc, AHCI_REG_CCC_CTL, ccc_ctl | 1); 483 } 484 noccc: 485 #endif 486 /* 487 * Allocate per-port resources 488 * 489 * Ignore attach errors, leave the port intact for 490 * rescan and continue the loop. 491 * 492 * All ports are attached in parallel but the CAM scan-bus 493 * is held up until all ports are attached so we get a deterministic 494 * order. 495 */ 496 for (i = 0; error == 0 && i < AHCI_MAX_PORTS; i++) { 497 if ((pi & (1 << i)) == 0) { 498 /* dont allocate stuff if the port isnt implemented */ 499 continue; 500 } 501 error = ahci_port_alloc(sc, i); 502 } 503 504 /* 505 * Setup the interrupt vector and enable interrupts. Note that 506 * since the irq may be shared we do not set it up until we are 507 * ready to go. 508 */ 509 if (error == 0) { 510 error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE, 511 ahci_intr, sc, 512 &sc->sc_irq_handle, NULL); 513 } 514 515 if (error) { 516 device_printf(dev, "unable to install interrupt\n"); 517 ahci_pci_detach(dev); 518 return (ENXIO); 519 } 520 521 /* 522 * Before marking the sc as good, which allows the interrupt 523 * subsystem to operate on the ports, wait for all the port threads 524 * to get past their initial pre-probe init. Otherwise an interrupt 525 * may try to process the port before it has been initialized. 526 */ 527 for (i = 0; i < AHCI_MAX_PORTS; i++) { 528 if ((ap = sc->sc_ports[i]) != NULL) { 529 while (ap->ap_signal & AP_SIGF_THREAD_SYNC) 530 tsleep(&ap->ap_signal, 0, "ahprb1", hz); 531 } 532 } 533 534 /* 535 * Master interrupt enable, and call ahci_intr() in case we race 536 * our AHCI_F_INT_GOOD flag. 537 */ 538 crit_enter(); 539 ahci_write(sc, AHCI_REG_GHC, AHCI_REG_GHC_AE | AHCI_REG_GHC_IE); 540 sc->sc_flags |= AHCI_F_INT_GOOD; 541 crit_exit(); 542 ahci_intr(sc); 543 544 /* 545 * All ports are probing in parallel. Wait for them to finish 546 * and then issue the cam attachment and bus scan serially so 547 * the 'da' assignments are deterministic. 548 */ 549 for (i = 0; i < AHCI_MAX_PORTS; i++) { 550 if ((ap = sc->sc_ports[i]) != NULL) { 551 while (ap->ap_signal & AP_SIGF_INIT) 552 tsleep(&ap->ap_signal, 0, "ahprb2", hz); 553 ahci_os_lock_port(ap); 554 if (ahci_cam_attach(ap) == 0) { 555 ahci_cam_changed(ap, NULL, -1); 556 ahci_os_unlock_port(ap); 557 while ((ap->ap_flags & AP_F_SCAN_COMPLETED) == 0) { 558 tsleep(&ap->ap_flags, 0, "ahprb2", hz); 559 } 560 } else { 561 ahci_os_unlock_port(ap); 562 } 563 } 564 } 565 566 return(0); 567 } 568 569 /* 570 * Device unload / detachment 571 */ 572 static int 573 ahci_pci_detach(device_t dev) 574 { 575 struct ahci_softc *sc = device_get_softc(dev); 576 struct ahci_port *ap; 577 int i; 578 579 /* 580 * Disable the controller and de-register the interrupt, if any. 581 * 582 * XXX interlock last interrupt? 583 */ 584 sc->sc_flags &= ~AHCI_F_INT_GOOD; 585 if (sc->sc_regs) 586 ahci_write(sc, AHCI_REG_GHC, 0); 587 588 if (sc->sc_irq_handle) { 589 bus_teardown_intr(dev, sc->sc_irq, sc->sc_irq_handle); 590 sc->sc_irq_handle = NULL; 591 } 592 593 /* 594 * Free port structures and DMA memory 595 */ 596 for (i = 0; i < AHCI_MAX_PORTS; i++) { 597 ap = sc->sc_ports[i]; 598 if (ap) { 599 ahci_cam_detach(ap); 600 ahci_port_free(sc, i); 601 } 602 } 603 604 /* 605 * Clean up the bus space 606 */ 607 if (sc->sc_irq) { 608 bus_release_resource(dev, SYS_RES_IRQ, 609 sc->sc_rid_irq, sc->sc_irq); 610 sc->sc_irq = NULL; 611 } 612 613 if (sc->sc_irq_type == PCI_INTR_TYPE_MSI) 614 pci_release_msi(dev); 615 616 if (sc->sc_regs) { 617 bus_release_resource(dev, SYS_RES_MEMORY, 618 sc->sc_rid_regs, sc->sc_regs); 619 sc->sc_regs = NULL; 620 } 621 622 if (sc->sc_tag_rfis) { 623 bus_dma_tag_destroy(sc->sc_tag_rfis); 624 sc->sc_tag_rfis = NULL; 625 } 626 if (sc->sc_tag_cmdh) { 627 bus_dma_tag_destroy(sc->sc_tag_cmdh); 628 sc->sc_tag_cmdh = NULL; 629 } 630 if (sc->sc_tag_cmdt) { 631 bus_dma_tag_destroy(sc->sc_tag_cmdt); 632 sc->sc_tag_cmdt = NULL; 633 } 634 if (sc->sc_tag_data) { 635 bus_dma_tag_destroy(sc->sc_tag_data); 636 sc->sc_tag_data = NULL; 637 } 638 639 return (0); 640 } 641