1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 2006 David Gwynne <dlg@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 * 18 * 19 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 20 * 21 * This code is derived from software contributed to The DragonFly Project 22 * by Matthew Dillon <dillon@backplane.com> 23 * 24 * Redistribution and use in source and binary forms, with or without 25 * modification, are permitted provided that the following conditions 26 * are met: 27 * 28 * 1. Redistributions of source code must retain the above copyright 29 * notice, this list of conditions and the following disclaimer. 30 * 2. Redistributions in binary form must reproduce the above copyright 31 * notice, this list of conditions and the following disclaimer in 32 * the documentation and/or other materials provided with the 33 * distribution. 34 * 3. Neither the name of The DragonFly Project nor the names of its 35 * contributors may be used to endorse or promote products derived 36 * from this software without specific, prior written permission. 37 * 38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 39 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 41 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 42 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 43 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 44 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 45 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 46 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 47 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 48 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 49 * SUCH DAMAGE. 50 * 51 * $OpenBSD: ahci.c,v 1.147 2009/02/16 21:19:07 miod Exp $ 52 */ 53 54 #include "ahci.h" 55 56 static int ahci_vt8251_attach(device_t); 57 static int ahci_ati_sb600_attach(device_t); 58 static int ahci_nvidia_mcp_attach(device_t); 59 static int ahci_pci_attach(device_t); 60 static int ahci_pci_detach(device_t); 61 62 static const struct ahci_device ahci_devices[] = { 63 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT8251_SATA, 64 ahci_vt8251_attach, ahci_pci_detach, "ViaTech-VT8251-SATA" }, 65 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB600_SATA, 66 ahci_ati_sb600_attach, ahci_pci_detach, "ATI-SB600-SATA" }, 67 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_2, 68 ahci_nvidia_mcp_attach, ahci_pci_detach, "NVidia-MCP65-SATA" }, 69 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_1, 70 ahci_nvidia_mcp_attach, ahci_pci_detach, "NVidia-MCP67-SATA" }, 71 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_5, 72 ahci_nvidia_mcp_attach, ahci_pci_detach, "NVidia-MCP77-SATA" }, 73 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_1, 74 ahci_nvidia_mcp_attach, ahci_pci_detach, "NVidia-MCP79-SATA" }, 75 { 0, 0, 76 ahci_pci_attach, ahci_pci_detach, "AHCI-PCI-SATA" } 77 }; 78 79 struct ahci_pciid { 80 uint16_t ahci_vid; 81 uint16_t ahci_did; 82 int ahci_rev; 83 }; 84 85 static const struct ahci_pciid ahci_msi_blacklist[] = { 86 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB600_SATA, -1 }, 87 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB700_AHCI, -1 }, 88 89 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_88SE6121, -1 }, 90 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_88SE6145, -1 }, 91 92 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_1, 0xa1 }, 93 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_2, 0xa1 }, 94 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_3, 0xa1 }, 95 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_4, 0xa1 }, 96 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_5, 0xa1 }, 97 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_6, 0xa1 }, 98 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_7, 0xa1 }, 99 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_8, 0xa1 }, 100 101 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_1, 0xa2 }, 102 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_2, 0xa2 }, 103 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_3, 0xa2 }, 104 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_4, 0xa2 }, 105 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_5, 0xa2 }, 106 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_6, 0xa2 }, 107 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_7, 0xa2 }, 108 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_8, 0xa2 } 109 }; 110 111 static int ahci_msi_enable = 1; 112 TUNABLE_INT("hw.ahci.msi.enable", &ahci_msi_enable); 113 114 /* 115 * Match during probe and attach. The device does not yet have a softc. 116 */ 117 const struct ahci_device * 118 ahci_lookup_device(device_t dev) 119 { 120 const struct ahci_device *ad; 121 u_int16_t vendor = pci_get_vendor(dev); 122 u_int16_t product = pci_get_device(dev); 123 u_int8_t class = pci_get_class(dev); 124 u_int8_t subclass = pci_get_subclass(dev); 125 u_int8_t progif = pci_read_config(dev, PCIR_PROGIF, 1); 126 int is_ahci; 127 128 /* 129 * Generally speaking if the pci device does not identify as 130 * AHCI we skip it. 131 */ 132 if (class == PCIC_STORAGE && subclass == PCIS_STORAGE_SATA && 133 progif == PCIP_STORAGE_SATA_AHCI_1_0) { 134 is_ahci = 1; 135 } else { 136 is_ahci = 0; 137 } 138 139 for (ad = &ahci_devices[0]; ad->ad_vendor; ++ad) { 140 if (ad->ad_vendor == vendor && ad->ad_product == product) 141 return (ad); 142 } 143 144 /* 145 * Last ad is the default match if the PCI device matches SATA. 146 */ 147 if (is_ahci == 0) 148 ad = NULL; 149 return (ad); 150 } 151 152 /* 153 * Attach functions. They all eventually fall through to ahci_pci_attach(). 154 */ 155 static int 156 ahci_vt8251_attach(device_t dev) 157 { 158 struct ahci_softc *sc = device_get_softc(dev); 159 160 sc->sc_flags |= AHCI_F_NO_NCQ; 161 return (ahci_pci_attach(dev)); 162 } 163 164 static int 165 ahci_ati_sb600_attach(device_t dev) 166 { 167 struct ahci_softc *sc = device_get_softc(dev); 168 pcireg_t magic; 169 u_int8_t subclass = pci_get_subclass(dev); 170 u_int8_t revid; 171 172 if (subclass == PCIS_STORAGE_IDE) { 173 revid = pci_read_config(dev, PCIR_REVID, 1); 174 magic = pci_read_config(dev, AHCI_PCI_ATI_SB600_MAGIC, 4); 175 pci_write_config(dev, AHCI_PCI_ATI_SB600_MAGIC, 176 magic | AHCI_PCI_ATI_SB600_LOCKED, 4); 177 pci_write_config(dev, PCIR_REVID, 178 (PCIC_STORAGE << 24) | 179 (PCIS_STORAGE_SATA << 16) | 180 (PCIP_STORAGE_SATA_AHCI_1_0 << 8) | 181 revid, 4); 182 pci_write_config(dev, AHCI_PCI_ATI_SB600_MAGIC, magic, 4); 183 } 184 185 sc->sc_flags |= AHCI_F_IGN_FR; 186 return (ahci_pci_attach(dev)); 187 } 188 189 static int 190 ahci_nvidia_mcp_attach(device_t dev) 191 { 192 struct ahci_softc *sc = device_get_softc(dev); 193 194 sc->sc_flags |= AHCI_F_IGN_FR; 195 return (ahci_pci_attach(dev)); 196 } 197 198 static int 199 ahci_pci_attach(device_t dev) 200 { 201 struct ahci_softc *sc = device_get_softc(dev); 202 struct ahci_port *ap; 203 const char *gen; 204 uint16_t vid, did; 205 u_int32_t pi, reg; 206 u_int32_t cap, cap2; 207 u_int irq_flags; 208 bus_addr_t addr; 209 int i, error, msi_enable, rev, fbs; 210 char revbuf[32]; 211 212 if (pci_read_config(dev, PCIR_COMMAND, 2) & 0x0400) { 213 device_printf(dev, "BIOS disabled PCI interrupt, " 214 "re-enabling\n"); 215 pci_write_config(dev, PCIR_COMMAND, 216 pci_read_config(dev, PCIR_COMMAND, 2) & ~0x0400, 2); 217 } 218 219 sc->sc_dev = dev; 220 221 /* 222 * Map the AHCI controller's IRQ and BAR(5) (hardware registers) 223 */ 224 225 msi_enable = ahci_msi_enable; 226 227 vid = pci_get_vendor(dev); 228 did = pci_get_device(dev); 229 rev = pci_get_revid(dev); 230 for (i = 0; i < NELEM(ahci_msi_blacklist); ++i) { 231 const struct ahci_pciid *id = &ahci_msi_blacklist[i]; 232 233 if (vid == id->ahci_vid && did == id->ahci_did) { 234 if (id->ahci_rev < 0 || id->ahci_rev == rev) { 235 msi_enable = 0; 236 break; 237 } 238 } 239 } 240 241 sc->sc_irq_type = pci_alloc_1intr(dev, msi_enable, 242 &sc->sc_rid_irq, &irq_flags); 243 244 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_rid_irq, 245 irq_flags); 246 if (sc->sc_irq == NULL) { 247 device_printf(dev, "unable to map interrupt\n"); 248 ahci_pci_detach(dev); 249 return (ENXIO); 250 } 251 252 /* 253 * When mapping the register window store the tag and handle 254 * separately so we can use the tag with per-port bus handle 255 * sub-spaces. 256 */ 257 sc->sc_rid_regs = PCIR_BAR(5); 258 sc->sc_regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 259 &sc->sc_rid_regs, RF_ACTIVE); 260 if (sc->sc_regs == NULL) { 261 device_printf(dev, "unable to map registers\n"); 262 ahci_pci_detach(dev); 263 return (ENXIO); 264 } 265 sc->sc_iot = rman_get_bustag(sc->sc_regs); 266 sc->sc_ioh = rman_get_bushandle(sc->sc_regs); 267 268 /* 269 * Initialize the chipset and then set the interrupt vector up 270 */ 271 error = ahci_init(sc); 272 if (error) { 273 ahci_pci_detach(dev); 274 return (ENXIO); 275 } 276 277 /* 278 * Get the AHCI capabilities and max number of concurrent 279 * command tags and set up the DMA tags. Adjust the saved 280 * sc_cap according to override flags. 281 */ 282 cap = sc->sc_cap = ahci_read(sc, AHCI_REG_CAP); 283 if (sc->sc_flags & AHCI_F_NO_NCQ) 284 sc->sc_cap &= ~AHCI_REG_CAP_SNCQ; 285 if (sc->sc_flags & AHCI_F_FORCE_FBSS) 286 sc->sc_cap |= AHCI_REG_CAP_FBSS; 287 288 /* 289 * We assume at least 4 commands. 290 */ 291 sc->sc_ncmds = AHCI_REG_CAP_NCS(cap); 292 if (sc->sc_ncmds < 4) { 293 device_printf(dev, "NCS must probe a value >= 4\n"); 294 ahci_pci_detach(dev); 295 return (ENXIO); 296 } 297 298 addr = (cap & AHCI_REG_CAP_S64A) ? 299 BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT; 300 301 /* 302 * DMA tags for allocation of DMA memory buffers, lists, and so 303 * forth. These are typically per-port. 304 * 305 * When FIS-based switching is supported we need a rfis for 306 * each target (4K total). The spec also requires 4K alignment 307 * for this case. 308 */ 309 fbs = (cap & AHCI_REG_CAP_FBSS) ? 16 : 1; 310 error = 0; 311 312 error += bus_dma_tag_create( 313 NULL, /* parent tag */ 314 256 * fbs, /* alignment */ 315 PAGE_SIZE, /* boundary */ 316 addr, /* loaddr? */ 317 BUS_SPACE_MAXADDR, /* hiaddr */ 318 NULL, /* filter */ 319 NULL, /* filterarg */ 320 sizeof(struct ahci_rfis) * fbs, /* [max]size */ 321 1, /* maxsegs */ 322 sizeof(struct ahci_rfis) * fbs, /* maxsegsz */ 323 0, /* flags */ 324 &sc->sc_tag_rfis); /* return tag */ 325 326 error += bus_dma_tag_create( 327 NULL, /* parent tag */ 328 32, /* alignment */ 329 4096 * 1024, /* boundary */ 330 addr, /* loaddr? */ 331 BUS_SPACE_MAXADDR, /* hiaddr */ 332 NULL, /* filter */ 333 NULL, /* filterarg */ 334 sc->sc_ncmds * sizeof(struct ahci_cmd_hdr), 335 1, /* maxsegs */ 336 sc->sc_ncmds * sizeof(struct ahci_cmd_hdr), 337 0, /* flags */ 338 &sc->sc_tag_cmdh); /* return tag */ 339 340 /* 341 * NOTE: ahci_cmd_table is sized to a power of 2 342 */ 343 error += bus_dma_tag_create( 344 NULL, /* parent tag */ 345 sizeof(struct ahci_cmd_table), /* alignment */ 346 4096 * 1024, /* boundary */ 347 addr, /* loaddr? */ 348 BUS_SPACE_MAXADDR, /* hiaddr */ 349 NULL, /* filter */ 350 NULL, /* filterarg */ 351 sc->sc_ncmds * sizeof(struct ahci_cmd_table), 352 1, /* maxsegs */ 353 sc->sc_ncmds * sizeof(struct ahci_cmd_table), 354 0, /* flags */ 355 &sc->sc_tag_cmdt); /* return tag */ 356 357 /* 358 * The data tag is used for later dmamaps and not immediately 359 * allocated. 360 */ 361 error += bus_dma_tag_create( 362 NULL, /* parent tag */ 363 4, /* alignment */ 364 0, /* boundary */ 365 addr, /* loaddr? */ 366 BUS_SPACE_MAXADDR, /* hiaddr */ 367 NULL, /* filter */ 368 NULL, /* filterarg */ 369 4096 * 1024, /* maxiosize */ 370 AHCI_MAX_PRDT, /* maxsegs */ 371 65536, /* maxsegsz */ 372 0, /* flags */ 373 &sc->sc_tag_data); /* return tag */ 374 375 if (error) { 376 device_printf(dev, "unable to create dma tags\n"); 377 ahci_pci_detach(dev); 378 return (ENXIO); 379 } 380 381 switch (cap & AHCI_REG_CAP_ISS) { 382 case AHCI_REG_CAP_ISS_G1: 383 gen = "1 (1.5Gbps)"; 384 break; 385 case AHCI_REG_CAP_ISS_G2: 386 gen = "2 (3Gbps)"; 387 break; 388 case AHCI_REG_CAP_ISS_G3: 389 gen = "3 (6Gbps)"; 390 break; 391 default: 392 gen = "unknown"; 393 break; 394 } 395 396 /* check the revision */ 397 reg = ahci_read(sc, AHCI_REG_VS); 398 399 if (reg & 0x0000FF) { 400 ksnprintf(revbuf, sizeof(revbuf), "AHCI %d.%d.%d", 401 (reg >> 16), (uint8_t)(reg >> 8), (uint8_t)reg); 402 } else { 403 ksnprintf(revbuf, sizeof(revbuf), "AHCI %d.%d", 404 (reg >> 16), (uint8_t)(reg >> 8)); 405 } 406 sc->sc_vers = reg; 407 408 if (reg >= AHCI_REG_VS_1_3) { 409 cap2 = ahci_read(sc, AHCI_REG_CAP2); 410 device_printf(dev, 411 "%s cap 0x%b cap2 0x%b, %d ports, " 412 "%d tags/port, gen %s\n", 413 revbuf, 414 cap, AHCI_FMT_CAP, 415 cap2, AHCI_FMT_CAP2, 416 AHCI_REG_CAP_NP(cap), sc->sc_ncmds, gen); 417 } else { 418 cap2 = 0; 419 device_printf(dev, 420 "%s cap 0x%b, %d ports, " 421 "%d tags/port, gen %s\n", 422 revbuf, 423 cap, AHCI_FMT_CAP, 424 AHCI_REG_CAP_NP(cap), sc->sc_ncmds, gen); 425 } 426 sc->sc_cap2 = cap2; 427 428 pi = ahci_read(sc, AHCI_REG_PI); 429 DPRINTF(AHCI_D_VERBOSE, "%s: ports implemented: 0x%08x\n", 430 DEVNAME(sc), pi); 431 432 #ifdef AHCI_COALESCE 433 /* Naive coalescing support - enable for all ports. */ 434 if (cap & AHCI_REG_CAP_CCCS) { 435 u_int16_t ccc_timeout = 20; 436 u_int8_t ccc_numcomplete = 12; 437 u_int32_t ccc_ctl; 438 439 /* disable coalescing during reconfiguration. */ 440 ccc_ctl = ahci_read(sc, AHCI_REG_CCC_CTL); 441 ccc_ctl &= ~0x00000001; 442 ahci_write(sc, AHCI_REG_CCC_CTL, ccc_ctl); 443 444 sc->sc_ccc_mask = 1 << AHCI_REG_CCC_CTL_INT(ccc_ctl); 445 if (pi & sc->sc_ccc_mask) { 446 /* A conflict with the implemented port list? */ 447 printf("%s: coalescing interrupt/implemented port list " 448 "conflict, PI: %08x, ccc_mask: %08x\n", 449 DEVNAME(sc), pi, sc->sc_ccc_mask); 450 sc->sc_ccc_mask = 0; 451 goto noccc; 452 } 453 454 /* ahci_port_start will enable each port when it starts. */ 455 sc->sc_ccc_ports = pi; 456 sc->sc_ccc_ports_cur = 0; 457 458 /* program thresholds and enable overall coalescing. */ 459 ccc_ctl &= ~0xffffff00; 460 ccc_ctl |= (ccc_timeout << 16) | (ccc_numcomplete << 8); 461 ahci_write(sc, AHCI_REG_CCC_CTL, ccc_ctl); 462 ahci_write(sc, AHCI_REG_CCC_PORTS, 0); 463 ahci_write(sc, AHCI_REG_CCC_CTL, ccc_ctl | 1); 464 } 465 noccc: 466 #endif 467 /* 468 * Allocate per-port resources 469 * 470 * Ignore attach errors, leave the port intact for 471 * rescan and continue the loop. 472 * 473 * All ports are attached in parallel but the CAM scan-bus 474 * is held up until all ports are attached so we get a deterministic 475 * order. 476 */ 477 for (i = 0; error == 0 && i < AHCI_MAX_PORTS; i++) { 478 if ((pi & (1 << i)) == 0) { 479 /* dont allocate stuff if the port isnt implemented */ 480 continue; 481 } 482 error = ahci_port_alloc(sc, i); 483 } 484 485 /* 486 * Setup the interrupt vector and enable interrupts. Note that 487 * since the irq may be shared we do not set it up until we are 488 * ready to go. 489 */ 490 if (error == 0) { 491 error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE, 492 ahci_intr, sc, 493 &sc->sc_irq_handle, NULL); 494 } 495 496 if (error) { 497 device_printf(dev, "unable to install interrupt\n"); 498 ahci_pci_detach(dev); 499 return (ENXIO); 500 } 501 502 /* 503 * Before marking the sc as good, which allows the interrupt 504 * subsystem to operate on the ports, wait for all the port threads 505 * to get past their initial pre-probe init. Otherwise an interrupt 506 * may try to process the port before it has been initialized. 507 */ 508 for (i = 0; i < AHCI_MAX_PORTS; i++) { 509 if ((ap = sc->sc_ports[i]) != NULL) { 510 while (ap->ap_signal & AP_SIGF_THREAD_SYNC) 511 tsleep(&ap->ap_signal, 0, "ahprb1", hz); 512 } 513 } 514 515 /* 516 * Master interrupt enable, and call ahci_intr() in case we race 517 * our AHCI_F_INT_GOOD flag. 518 */ 519 crit_enter(); 520 ahci_write(sc, AHCI_REG_GHC, AHCI_REG_GHC_AE | AHCI_REG_GHC_IE); 521 sc->sc_flags |= AHCI_F_INT_GOOD; 522 crit_exit(); 523 ahci_intr(sc); 524 525 /* 526 * All ports are probing in parallel. Wait for them to finish 527 * and then issue the cam attachment and bus scan serially so 528 * the 'da' assignments are deterministic. 529 */ 530 for (i = 0; i < AHCI_MAX_PORTS; i++) { 531 if ((ap = sc->sc_ports[i]) != NULL) { 532 while (ap->ap_signal & AP_SIGF_INIT) 533 tsleep(&ap->ap_signal, 0, "ahprb2", hz); 534 ahci_os_lock_port(ap); 535 if (ahci_cam_attach(ap) == 0) { 536 ahci_cam_changed(ap, NULL, -1); 537 ahci_os_unlock_port(ap); 538 while ((ap->ap_flags & AP_F_SCAN_COMPLETED) == 0) { 539 tsleep(&ap->ap_flags, 0, "ahprb2", hz); 540 } 541 } else { 542 ahci_os_unlock_port(ap); 543 } 544 } 545 } 546 547 return(0); 548 } 549 550 /* 551 * Device unload / detachment 552 */ 553 static int 554 ahci_pci_detach(device_t dev) 555 { 556 struct ahci_softc *sc = device_get_softc(dev); 557 struct ahci_port *ap; 558 int i; 559 560 /* 561 * Disable the controller and de-register the interrupt, if any. 562 * 563 * XXX interlock last interrupt? 564 */ 565 sc->sc_flags &= ~AHCI_F_INT_GOOD; 566 if (sc->sc_regs) 567 ahci_write(sc, AHCI_REG_GHC, 0); 568 569 if (sc->sc_irq_handle) { 570 bus_teardown_intr(dev, sc->sc_irq, sc->sc_irq_handle); 571 sc->sc_irq_handle = NULL; 572 } 573 574 /* 575 * Free port structures and DMA memory 576 */ 577 for (i = 0; i < AHCI_MAX_PORTS; i++) { 578 ap = sc->sc_ports[i]; 579 if (ap) { 580 ahci_cam_detach(ap); 581 ahci_port_free(sc, i); 582 } 583 } 584 585 /* 586 * Clean up the bus space 587 */ 588 if (sc->sc_irq) { 589 bus_release_resource(dev, SYS_RES_IRQ, 590 sc->sc_rid_irq, sc->sc_irq); 591 sc->sc_irq = NULL; 592 } 593 594 if (sc->sc_irq_type == PCI_INTR_TYPE_MSI) 595 pci_release_msi(dev); 596 597 if (sc->sc_regs) { 598 bus_release_resource(dev, SYS_RES_MEMORY, 599 sc->sc_rid_regs, sc->sc_regs); 600 sc->sc_regs = NULL; 601 } 602 603 if (sc->sc_tag_rfis) { 604 bus_dma_tag_destroy(sc->sc_tag_rfis); 605 sc->sc_tag_rfis = NULL; 606 } 607 if (sc->sc_tag_cmdh) { 608 bus_dma_tag_destroy(sc->sc_tag_cmdh); 609 sc->sc_tag_cmdh = NULL; 610 } 611 if (sc->sc_tag_cmdt) { 612 bus_dma_tag_destroy(sc->sc_tag_cmdt); 613 sc->sc_tag_cmdt = NULL; 614 } 615 if (sc->sc_tag_data) { 616 bus_dma_tag_destroy(sc->sc_tag_data); 617 sc->sc_tag_data = NULL; 618 } 619 620 return (0); 621 } 622