1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 2006 David Gwynne <dlg@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 * 18 * 19 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 20 * 21 * This code is derived from software contributed to The DragonFly Project 22 * by Matthew Dillon <dillon@backplane.com> 23 * 24 * Redistribution and use in source and binary forms, with or without 25 * modification, are permitted provided that the following conditions 26 * are met: 27 * 28 * 1. Redistributions of source code must retain the above copyright 29 * notice, this list of conditions and the following disclaimer. 30 * 2. Redistributions in binary form must reproduce the above copyright 31 * notice, this list of conditions and the following disclaimer in 32 * the documentation and/or other materials provided with the 33 * distribution. 34 * 3. Neither the name of The DragonFly Project nor the names of its 35 * contributors may be used to endorse or promote products derived 36 * from this software without specific, prior written permission. 37 * 38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 39 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 41 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 42 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 43 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 44 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 45 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 46 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 47 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 48 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 49 * SUCH DAMAGE. 50 * 51 * $OpenBSD: ahci.c,v 1.147 2009/02/16 21:19:07 miod Exp $ 52 */ 53 54 #include "ahci.h" 55 56 static int ahci_vt8251_attach(device_t); 57 static int ahci_ati_sb600_attach(device_t); 58 static int ahci_nvidia_mcp_attach(device_t); 59 static int ahci_pci_attach(device_t); 60 static int ahci_pci_detach(device_t); 61 62 static const struct ahci_device ahci_devices[] = { 63 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT8251_SATA, 64 ahci_vt8251_attach, ahci_pci_detach, "ViaTech-VT8251-SATA" }, 65 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB600_SATA, 66 ahci_ati_sb600_attach, ahci_pci_detach, "ATI-SB600-SATA" }, 67 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_2, 68 ahci_nvidia_mcp_attach, ahci_pci_detach, "NVidia-MCP65-SATA" }, 69 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_1, 70 ahci_nvidia_mcp_attach, ahci_pci_detach, "NVidia-MCP67-SATA" }, 71 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_5, 72 ahci_nvidia_mcp_attach, ahci_pci_detach, "NVidia-MCP77-SATA" }, 73 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_1, 74 ahci_nvidia_mcp_attach, ahci_pci_detach, "NVidia-MCP79-SATA" }, 75 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_9, 76 ahci_nvidia_mcp_attach, ahci_pci_detach, "NVidia-MCP79-SATA" }, 77 { 0, 0, 78 ahci_pci_attach, ahci_pci_detach, "AHCI-PCI-SATA" } 79 }; 80 81 struct ahci_pciid { 82 uint16_t ahci_vid; 83 uint16_t ahci_did; 84 int ahci_rev; 85 }; 86 87 static const struct ahci_pciid ahci_msi_blacklist[] = { 88 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB600_SATA, -1 }, 89 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB700_AHCI, -1 }, 90 91 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_88SE6121, -1 }, 92 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_88SE6145, -1 }, 93 94 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_1, 0xa1 }, 95 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_2, 0xa1 }, 96 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_3, 0xa1 }, 97 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_4, 0xa1 }, 98 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_5, 0xa1 }, 99 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_6, 0xa1 }, 100 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_7, 0xa1 }, 101 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_8, 0xa1 }, 102 103 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_1, 0xa2 }, 104 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_2, 0xa2 }, 105 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_3, 0xa2 }, 106 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_4, 0xa2 }, 107 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_5, 0xa2 }, 108 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_6, 0xa2 }, 109 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_7, 0xa2 }, 110 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_8, 0xa2 } 111 }; 112 113 static int ahci_msi_enable = 1; 114 int ahci_synchronous_boot = 1; 115 TUNABLE_INT("hw.ahci.msi.enable", &ahci_msi_enable); 116 TUNABLE_INT("hw.ahci.synchronous_boot", &ahci_synchronous_boot); 117 118 /* 119 * Match during probe and attach. The device does not yet have a softc. 120 */ 121 const struct ahci_device * 122 ahci_lookup_device(device_t dev) 123 { 124 const struct ahci_device *ad; 125 u_int16_t vendor = pci_get_vendor(dev); 126 u_int16_t product = pci_get_device(dev); 127 u_int8_t class = pci_get_class(dev); 128 u_int8_t subclass = pci_get_subclass(dev); 129 u_int8_t progif = pci_read_config(dev, PCIR_PROGIF, 1); 130 int is_ahci; 131 132 /* 133 * Generally speaking if the pci device does not identify as 134 * AHCI we skip it. 135 */ 136 if (class == PCIC_STORAGE && subclass == PCIS_STORAGE_SATA && 137 progif == PCIP_STORAGE_SATA_AHCI_1_0) { 138 is_ahci = 1; 139 } else { 140 is_ahci = 0; 141 } 142 143 for (ad = &ahci_devices[0]; ad->ad_vendor; ++ad) { 144 if (ad->ad_vendor == vendor && ad->ad_product == product) 145 return (ad); 146 } 147 148 /* 149 * Last ad is the default match if the PCI device matches SATA. 150 */ 151 if (is_ahci == 0) 152 ad = NULL; 153 return (ad); 154 } 155 156 /* 157 * Attach functions. They all eventually fall through to ahci_pci_attach(). 158 */ 159 static int 160 ahci_vt8251_attach(device_t dev) 161 { 162 struct ahci_softc *sc = device_get_softc(dev); 163 164 sc->sc_flags |= AHCI_F_NO_NCQ; 165 return (ahci_pci_attach(dev)); 166 } 167 168 static int 169 ahci_ati_sb600_attach(device_t dev) 170 { 171 struct ahci_softc *sc = device_get_softc(dev); 172 pcireg_t magic; 173 u_int8_t subclass = pci_get_subclass(dev); 174 u_int8_t revid; 175 176 if (subclass == PCIS_STORAGE_IDE) { 177 revid = pci_read_config(dev, PCIR_REVID, 1); 178 magic = pci_read_config(dev, AHCI_PCI_ATI_SB600_MAGIC, 4); 179 pci_write_config(dev, AHCI_PCI_ATI_SB600_MAGIC, 180 magic | AHCI_PCI_ATI_SB600_LOCKED, 4); 181 pci_write_config(dev, PCIR_REVID, 182 (PCIC_STORAGE << 24) | 183 (PCIS_STORAGE_SATA << 16) | 184 (PCIP_STORAGE_SATA_AHCI_1_0 << 8) | 185 revid, 4); 186 pci_write_config(dev, AHCI_PCI_ATI_SB600_MAGIC, magic, 4); 187 } 188 189 sc->sc_flags |= AHCI_F_IGN_FR; 190 return (ahci_pci_attach(dev)); 191 } 192 193 static int 194 ahci_nvidia_mcp_attach(device_t dev) 195 { 196 struct ahci_softc *sc = device_get_softc(dev); 197 198 sc->sc_flags |= AHCI_F_IGN_FR; 199 return (ahci_pci_attach(dev)); 200 } 201 202 static int 203 ahci_pci_attach(device_t dev) 204 { 205 struct ahci_softc *sc = device_get_softc(dev); 206 struct ahci_port *ap; 207 const char *gen; 208 uint16_t vid, did; 209 u_int32_t pi, reg; 210 u_int32_t cap, cap2; 211 u_int32_t chip; 212 u_int irq_flags; 213 bus_addr_t addr; 214 int i, error, msi_enable, rev, fbs; 215 char revbuf[32]; 216 217 if (pci_read_config(dev, PCIR_COMMAND, 2) & 0x0400) { 218 device_printf(dev, "BIOS disabled PCI interrupt, " 219 "re-enabling\n"); 220 pci_write_config(dev, PCIR_COMMAND, 221 pci_read_config(dev, PCIR_COMMAND, 2) & ~0x0400, 2); 222 } 223 224 /* 225 * Chip quirks. Sigh. The AHCI spec is not in the least confusing 226 * when it comes to how the FR and CR bits work, but some AHCI 227 * chipsets (aka Marvell) either don't have the bits at all or they 228 * implement them poorly. 229 */ 230 chip = ((uint16_t)pci_get_device(dev) << 16) | 231 (uint16_t)pci_get_vendor(dev); 232 233 switch(chip) { 234 case 0x91721b4b: 235 device_printf(dev, 236 "Enable 88SE9172 workarounds for broken chip\n"); 237 sc->sc_flags |= AHCI_F_IGN_FR; 238 sc->sc_flags |= AHCI_F_IGN_CR; 239 break; 240 case 0x92151b4b: 241 device_printf(dev, 242 "Enable 88SE9215 workarounds for broken chip\n"); 243 sc->sc_flags |= AHCI_F_IGN_FR; 244 sc->sc_flags |= AHCI_F_IGN_CR; 245 break; 246 case 0x92301b4b: 247 device_printf(dev, 248 "Enable 88SE9230 workarounds for broken chip\n"); 249 sc->sc_flags |= AHCI_F_CYCLE_FR; 250 break; 251 case 0x07f410de: 252 device_printf(dev, 253 "Enable nForce 630i workarounds for broken chip\n"); 254 sc->sc_flags |= AHCI_F_IGN_FR; 255 sc->sc_flags |= AHCI_F_IGN_CR; 256 break; 257 } 258 259 sc->sc_dev = dev; 260 261 /* 262 * Map the AHCI controller's IRQ and BAR(5) (hardware registers) 263 */ 264 msi_enable = ahci_msi_enable; 265 266 vid = pci_get_vendor(dev); 267 did = pci_get_device(dev); 268 rev = pci_get_revid(dev); 269 for (i = 0; i < NELEM(ahci_msi_blacklist); ++i) { 270 const struct ahci_pciid *id = &ahci_msi_blacklist[i]; 271 272 if (vid == id->ahci_vid && did == id->ahci_did) { 273 if (id->ahci_rev < 0 || id->ahci_rev == rev) { 274 msi_enable = 0; 275 break; 276 } 277 } 278 } 279 280 sc->sc_irq_type = pci_alloc_1intr(dev, msi_enable, 281 &sc->sc_rid_irq, &irq_flags); 282 283 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_rid_irq, 284 irq_flags); 285 if (sc->sc_irq == NULL) { 286 device_printf(dev, "unable to map interrupt\n"); 287 ahci_pci_detach(dev); 288 return (ENXIO); 289 } 290 291 /* 292 * When mapping the register window store the tag and handle 293 * separately so we can use the tag with per-port bus handle 294 * sub-spaces. 295 */ 296 sc->sc_rid_regs = PCIR_BAR(5); 297 sc->sc_regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 298 &sc->sc_rid_regs, RF_ACTIVE); 299 if (sc->sc_regs == NULL) { 300 device_printf(dev, "unable to map registers\n"); 301 ahci_pci_detach(dev); 302 return (ENXIO); 303 } 304 sc->sc_iot = rman_get_bustag(sc->sc_regs); 305 sc->sc_ioh = rman_get_bushandle(sc->sc_regs); 306 307 /* 308 * Initialize the chipset and then set the interrupt vector up 309 */ 310 error = ahci_init(sc); 311 if (error) { 312 ahci_pci_detach(dev); 313 return (ENXIO); 314 } 315 316 /* 317 * Get the AHCI capabilities and max number of concurrent 318 * command tags and set up the DMA tags. Adjust the saved 319 * sc_cap according to override flags. 320 */ 321 cap = ahci_read(sc, AHCI_REG_CAP); 322 if (sc->sc_flags & AHCI_F_NO_NCQ) 323 cap &= ~AHCI_REG_CAP_SNCQ; 324 if (sc->sc_flags & AHCI_F_FORCE_FBSS) 325 cap |= AHCI_REG_CAP_FBSS; 326 if (sc->sc_flags & AHCI_F_FORCE_SCLO) 327 cap |= AHCI_REG_CAP_SCLO; 328 sc->sc_cap = cap; 329 330 /* 331 * We assume at least 4 commands. 332 */ 333 sc->sc_ncmds = AHCI_REG_CAP_NCS(cap); 334 if (sc->sc_ncmds < 4) { 335 device_printf(dev, "NCS must probe a value >= 4\n"); 336 ahci_pci_detach(dev); 337 return (ENXIO); 338 } 339 340 addr = (cap & AHCI_REG_CAP_S64A) ? 341 BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT; 342 343 /* 344 * DMA tags for allocation of DMA memory buffers, lists, and so 345 * forth. These are typically per-port. 346 * 347 * When FIS-based switching is supported we need a rfis for 348 * each target (4K total). The spec also requires 4K alignment 349 * for this case. 350 */ 351 fbs = (cap & AHCI_REG_CAP_FBSS) ? 16 : 1; 352 error = 0; 353 354 sc->sc_rfis_size = sizeof(struct ahci_rfis) * fbs; 355 356 error += bus_dma_tag_create( 357 NULL, /* parent tag */ 358 sc->sc_rfis_size, /* alignment */ 359 PAGE_SIZE, /* boundary */ 360 addr, /* loaddr? */ 361 BUS_SPACE_MAXADDR, /* hiaddr */ 362 NULL, /* filter */ 363 NULL, /* filterarg */ 364 sc->sc_rfis_size, /* [max]size */ 365 1, /* maxsegs */ 366 sc->sc_rfis_size, /* maxsegsz */ 367 0, /* flags */ 368 &sc->sc_tag_rfis); /* return tag */ 369 370 sc->sc_cmdlist_size = sc->sc_ncmds * sizeof(struct ahci_cmd_hdr); 371 372 error += bus_dma_tag_create( 373 NULL, /* parent tag */ 374 32, /* alignment */ 375 4096 * 1024, /* boundary */ 376 addr, /* loaddr? */ 377 BUS_SPACE_MAXADDR, /* hiaddr */ 378 NULL, /* filter */ 379 NULL, /* filterarg */ 380 sc->sc_cmdlist_size, 381 1, /* maxsegs */ 382 sc->sc_cmdlist_size, 383 0, /* flags */ 384 &sc->sc_tag_cmdh); /* return tag */ 385 386 /* 387 * NOTE: ahci_cmd_table is sized to a power of 2 388 */ 389 error += bus_dma_tag_create( 390 NULL, /* parent tag */ 391 sizeof(struct ahci_cmd_table), /* alignment */ 392 4096 * 1024, /* boundary */ 393 addr, /* loaddr? */ 394 BUS_SPACE_MAXADDR, /* hiaddr */ 395 NULL, /* filter */ 396 NULL, /* filterarg */ 397 sc->sc_ncmds * sizeof(struct ahci_cmd_table), 398 1, /* maxsegs */ 399 sc->sc_ncmds * sizeof(struct ahci_cmd_table), 400 0, /* flags */ 401 &sc->sc_tag_cmdt); /* return tag */ 402 403 /* 404 * The data tag is used for later dmamaps and not immediately 405 * allocated. 406 */ 407 error += bus_dma_tag_create( 408 NULL, /* parent tag */ 409 4, /* alignment */ 410 0, /* boundary */ 411 addr, /* loaddr? */ 412 BUS_SPACE_MAXADDR, /* hiaddr */ 413 NULL, /* filter */ 414 NULL, /* filterarg */ 415 4096 * 1024, /* maxiosize */ 416 AHCI_MAX_PRDT, /* maxsegs */ 417 65536, /* maxsegsz */ 418 0, /* flags */ 419 &sc->sc_tag_data); /* return tag */ 420 421 if (error) { 422 device_printf(dev, "unable to create dma tags\n"); 423 ahci_pci_detach(dev); 424 return (ENXIO); 425 } 426 427 switch (cap & AHCI_REG_CAP_ISS) { 428 case AHCI_REG_CAP_ISS_G1: 429 gen = "1 (1.5Gbps)"; 430 break; 431 case AHCI_REG_CAP_ISS_G2: 432 gen = "2 (3Gbps)"; 433 break; 434 case AHCI_REG_CAP_ISS_G3: 435 gen = "3 (6Gbps)"; 436 break; 437 default: 438 gen = "unknown"; 439 break; 440 } 441 442 /* check the revision */ 443 reg = ahci_read(sc, AHCI_REG_VS); 444 445 if (reg & 0x0000FF) { 446 ksnprintf(revbuf, sizeof(revbuf), "AHCI %d.%d.%d", 447 (reg >> 16), (uint8_t)(reg >> 8), (uint8_t)reg); 448 } else { 449 ksnprintf(revbuf, sizeof(revbuf), "AHCI %d.%d", 450 (reg >> 16), (uint8_t)(reg >> 8)); 451 } 452 sc->sc_vers = reg; 453 454 if (reg >= AHCI_REG_VS_1_3) { 455 cap2 = ahci_read(sc, AHCI_REG_CAP2); 456 device_printf(dev, 457 "%s cap 0x%pb%i cap2 0x%pb%i, %d ports, " 458 "%d tags/port, gen %s\n", 459 revbuf, 460 AHCI_FMT_CAP, cap, 461 AHCI_FMT_CAP2, cap2, 462 AHCI_REG_CAP_NP(cap), sc->sc_ncmds, gen); 463 } else { 464 cap2 = 0; 465 device_printf(dev, 466 "%s cap 0x%pb%i, %d ports, " 467 "%d tags/port, gen %s\n", 468 revbuf, 469 AHCI_FMT_CAP, cap, 470 AHCI_REG_CAP_NP(cap), sc->sc_ncmds, gen); 471 } 472 sc->sc_cap2 = cap2; 473 474 pi = ahci_read(sc, AHCI_REG_PI); 475 DPRINTF(AHCI_D_VERBOSE, "%s: ports implemented: 0x%08x\n", 476 DEVNAME(sc), pi); 477 478 sc->sc_ipm_disable = AHCI_PREG_SCTL_IPM_NOPARTIAL | 479 AHCI_PREG_SCTL_IPM_NOSLUMBER; 480 if (sc->sc_cap2 & AHCI_REG_CAP2_SDS) 481 sc->sc_ipm_disable |= AHCI_PREG_SCTL_IPM_NODEVSLP; 482 483 #ifdef AHCI_COALESCE 484 /* Naive coalescing support - enable for all ports. */ 485 if (cap & AHCI_REG_CAP_CCCS) { 486 u_int16_t ccc_timeout = 20; 487 u_int8_t ccc_numcomplete = 12; 488 u_int32_t ccc_ctl; 489 490 /* disable coalescing during reconfiguration. */ 491 ccc_ctl = ahci_read(sc, AHCI_REG_CCC_CTL); 492 ccc_ctl &= ~0x00000001; 493 ahci_write(sc, AHCI_REG_CCC_CTL, ccc_ctl); 494 495 sc->sc_ccc_mask = 1 << AHCI_REG_CCC_CTL_INT(ccc_ctl); 496 if (pi & sc->sc_ccc_mask) { 497 /* A conflict with the implemented port list? */ 498 printf("%s: coalescing interrupt/implemented port list " 499 "conflict, PI: %08x, ccc_mask: %08x\n", 500 DEVNAME(sc), pi, sc->sc_ccc_mask); 501 sc->sc_ccc_mask = 0; 502 goto noccc; 503 } 504 505 /* ahci_port_start will enable each port when it starts. */ 506 sc->sc_ccc_ports = pi; 507 sc->sc_ccc_ports_cur = 0; 508 509 /* program thresholds and enable overall coalescing. */ 510 ccc_ctl &= ~0xffffff00; 511 ccc_ctl |= (ccc_timeout << 16) | (ccc_numcomplete << 8); 512 ahci_write(sc, AHCI_REG_CCC_CTL, ccc_ctl); 513 ahci_write(sc, AHCI_REG_CCC_PORTS, 0); 514 ahci_write(sc, AHCI_REG_CCC_CTL, ccc_ctl | 1); 515 } 516 noccc: 517 #endif 518 /* 519 * Allocate per-port resources 520 * 521 * Ignore attach errors, leave the port intact for 522 * rescan and continue the loop. 523 * 524 * All ports are attached in parallel but the CAM scan-bus 525 * is held up until all ports are attached so we get a deterministic 526 * order. 527 */ 528 for (i = 0; error == 0 && i < AHCI_MAX_PORTS; i++) { 529 if ((pi & (1 << i)) == 0) { 530 /* dont allocate stuff if the port isnt implemented */ 531 continue; 532 } 533 error = ahci_port_alloc(sc, i); 534 } 535 536 /* 537 * Setup the interrupt vector and enable interrupts. Note that 538 * since the irq may be shared we do not set it up until we are 539 * ready to go. 540 */ 541 if (error == 0) { 542 error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE | 543 INTR_HIFREQ, 544 ahci_intr, sc, 545 &sc->sc_irq_handle, NULL); 546 } 547 548 if (error) { 549 device_printf(dev, "unable to install interrupt\n"); 550 ahci_pci_detach(dev); 551 return (ENXIO); 552 } 553 554 /* 555 * Before marking the sc as good, which allows the interrupt 556 * subsystem to operate on the ports, wait for all the port threads 557 * to get past their initial pre-probe init. Otherwise an interrupt 558 * may try to process the port before it has been initialized. 559 */ 560 for (i = 0; i < AHCI_MAX_PORTS; i++) { 561 if ((ap = sc->sc_ports[i]) != NULL) { 562 while (ap->ap_signal & AP_SIGF_THREAD_SYNC) 563 tsleep(&ap->ap_signal, 0, "ahprb1", hz); 564 } 565 } 566 567 /* 568 * Master interrupt enable, and call ahci_intr() in case we race 569 * our AHCI_F_INT_GOOD flag. 570 */ 571 crit_enter(); 572 ahci_write(sc, AHCI_REG_GHC, AHCI_REG_GHC_AE | AHCI_REG_GHC_IE); 573 sc->sc_flags |= AHCI_F_INT_GOOD; 574 crit_exit(); 575 ahci_intr(sc); 576 577 /* 578 * Synchronously wait for some of the AHCI devices to initialize. 579 * 580 * All ports are probing in parallel. Wait for them to finish 581 * and then issue the cam attachment and bus scan serially so 582 * the 'da' assignments are deterministic. 583 */ 584 for (i = 0; i < AHCI_MAX_PORTS && ahci_synchronous_boot; i++) { 585 if ((ap = sc->sc_ports[i]) != NULL) { 586 while (ap->ap_signal & AP_SIGF_INIT) 587 tsleep(&ap->ap_signal, 0, "ahprb2", hz); 588 ahci_os_lock_port(ap); 589 if (ahci_cam_attach(ap) == 0) { 590 ahci_cam_changed(ap, NULL, -1); 591 ahci_os_unlock_port(ap); 592 while ((ap->ap_flags & AP_F_SCAN_COMPLETED) == 0) { 593 tsleep(&ap->ap_flags, 0, "ahprb3", hz); 594 } 595 } else { 596 ahci_os_unlock_port(ap); 597 } 598 } 599 } 600 601 return(0); 602 } 603 604 /* 605 * Device unload / detachment 606 */ 607 static int 608 ahci_pci_detach(device_t dev) 609 { 610 struct ahci_softc *sc = device_get_softc(dev); 611 struct ahci_port *ap; 612 int i; 613 614 /* 615 * Disable the controller and de-register the interrupt, if any. 616 * 617 * XXX interlock last interrupt? 618 */ 619 sc->sc_flags &= ~AHCI_F_INT_GOOD; 620 if (sc->sc_regs) 621 ahci_write(sc, AHCI_REG_GHC, 0); 622 623 if (sc->sc_irq_handle) { 624 bus_teardown_intr(dev, sc->sc_irq, sc->sc_irq_handle); 625 sc->sc_irq_handle = NULL; 626 } 627 628 /* 629 * Free port structures and DMA memory 630 */ 631 for (i = 0; i < AHCI_MAX_PORTS; i++) { 632 ap = sc->sc_ports[i]; 633 if (ap) { 634 ahci_cam_detach(ap); 635 ahci_port_free(sc, i); 636 } 637 } 638 639 /* 640 * Clean up the bus space 641 */ 642 if (sc->sc_irq) { 643 bus_release_resource(dev, SYS_RES_IRQ, 644 sc->sc_rid_irq, sc->sc_irq); 645 sc->sc_irq = NULL; 646 } 647 648 if (sc->sc_irq_type == PCI_INTR_TYPE_MSI) 649 pci_release_msi(dev); 650 651 if (sc->sc_regs) { 652 bus_release_resource(dev, SYS_RES_MEMORY, 653 sc->sc_rid_regs, sc->sc_regs); 654 sc->sc_regs = NULL; 655 } 656 657 if (sc->sc_tag_rfis) { 658 bus_dma_tag_destroy(sc->sc_tag_rfis); 659 sc->sc_tag_rfis = NULL; 660 } 661 if (sc->sc_tag_cmdh) { 662 bus_dma_tag_destroy(sc->sc_tag_cmdh); 663 sc->sc_tag_cmdh = NULL; 664 } 665 if (sc->sc_tag_cmdt) { 666 bus_dma_tag_destroy(sc->sc_tag_cmdt); 667 sc->sc_tag_cmdt = NULL; 668 } 669 if (sc->sc_tag_data) { 670 bus_dma_tag_destroy(sc->sc_tag_data); 671 sc->sc_tag_data = NULL; 672 } 673 674 return (0); 675 } 676