1 /*
2 * (MPSAFE)
3 *
4 * Copyright (c) 2006 David Gwynne <dlg@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 *
18 *
19 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
20 *
21 * This code is derived from software contributed to The DragonFly Project
22 * by Matthew Dillon <dillon@backplane.com>
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 *
28 * 1. Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * 2. Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in
32 * the documentation and/or other materials provided with the
33 * distribution.
34 * 3. Neither the name of The DragonFly Project nor the names of its
35 * contributors may be used to endorse or promote products derived
36 * from this software without specific, prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
41 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
42 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
43 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
44 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
45 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
46 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
47 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
48 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
49 * SUCH DAMAGE.
50 *
51 * $OpenBSD: ahci.c,v 1.147 2009/02/16 21:19:07 miod Exp $
52 */
53
54 #include "ahci.h"
55
56 static int ahci_vt8251_attach(device_t);
57 static int ahci_ati_sb600_attach(device_t);
58 static int ahci_nvidia_mcp_attach(device_t);
59 static int ahci_pci_attach(device_t);
60 static int ahci_pci_detach(device_t);
61
62 static const struct ahci_device ahci_devices[] = {
63 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT8251_SATA,
64 ahci_vt8251_attach, ahci_pci_detach, "ViaTech-VT8251-SATA" },
65 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB600_SATA,
66 ahci_ati_sb600_attach, ahci_pci_detach, "ATI-SB600-SATA" },
67 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_2,
68 ahci_nvidia_mcp_attach, ahci_pci_detach, "NVidia-MCP65-SATA" },
69 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_1,
70 ahci_nvidia_mcp_attach, ahci_pci_detach, "NVidia-MCP67-SATA" },
71 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_5,
72 ahci_nvidia_mcp_attach, ahci_pci_detach, "NVidia-MCP77-SATA" },
73 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_1,
74 ahci_nvidia_mcp_attach, ahci_pci_detach, "NVidia-MCP79-SATA" },
75 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_9,
76 ahci_nvidia_mcp_attach, ahci_pci_detach, "NVidia-MCP79-SATA" },
77 { 0, 0,
78 ahci_pci_attach, ahci_pci_detach, "AHCI-PCI-SATA" }
79 };
80
81 struct ahci_pciid {
82 uint16_t ahci_vid;
83 uint16_t ahci_did;
84 int ahci_rev;
85 };
86
87 static const struct ahci_pciid ahci_msi_blacklist[] = {
88 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB600_SATA, -1 },
89 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB700_AHCI, -1 },
90
91 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_88SE6121, -1 },
92 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_88SE6145, -1 },
93
94 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_1, 0xa1 },
95 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_2, 0xa1 },
96 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_3, 0xa1 },
97 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_4, 0xa1 },
98 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_5, 0xa1 },
99 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_6, 0xa1 },
100 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_7, 0xa1 },
101 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_8, 0xa1 },
102
103 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_1, 0xa2 },
104 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_2, 0xa2 },
105 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_3, 0xa2 },
106 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_4, 0xa2 },
107 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_5, 0xa2 },
108 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_6, 0xa2 },
109 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_7, 0xa2 },
110 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_8, 0xa2 }
111 };
112
113 static int ahci_msi_enable = 1;
114 int ahci_synchronous_boot = 1;
115 TUNABLE_INT("hw.ahci.msi.enable", &ahci_msi_enable);
116 TUNABLE_INT("hw.ahci.synchronous_boot", &ahci_synchronous_boot);
117
118 /*
119 * Match during probe and attach. The device does not yet have a softc.
120 */
121 const struct ahci_device *
ahci_lookup_device(device_t dev)122 ahci_lookup_device(device_t dev)
123 {
124 const struct ahci_device *ad;
125 u_int16_t vendor = pci_get_vendor(dev);
126 u_int16_t product = pci_get_device(dev);
127 u_int8_t class = pci_get_class(dev);
128 u_int8_t subclass = pci_get_subclass(dev);
129 u_int8_t progif = pci_read_config(dev, PCIR_PROGIF, 1);
130 int is_ahci;
131
132 /*
133 * Generally speaking if the pci device does not identify as
134 * AHCI we skip it.
135 */
136 if (class == PCIC_STORAGE && subclass == PCIS_STORAGE_SATA &&
137 progif == PCIP_STORAGE_SATA_AHCI_1_0) {
138 is_ahci = 1;
139 } else {
140 is_ahci = 0;
141 }
142
143 for (ad = &ahci_devices[0]; ad->ad_vendor; ++ad) {
144 if (ad->ad_vendor == vendor && ad->ad_product == product)
145 return (ad);
146 }
147
148 /*
149 * Last ad is the default match if the PCI device matches SATA.
150 */
151 if (is_ahci == 0)
152 ad = NULL;
153 return (ad);
154 }
155
156 /*
157 * Attach functions. They all eventually fall through to ahci_pci_attach().
158 */
159 static int
ahci_vt8251_attach(device_t dev)160 ahci_vt8251_attach(device_t dev)
161 {
162 struct ahci_softc *sc = device_get_softc(dev);
163
164 sc->sc_flags |= AHCI_F_NO_NCQ;
165 return (ahci_pci_attach(dev));
166 }
167
168 static int
ahci_ati_sb600_attach(device_t dev)169 ahci_ati_sb600_attach(device_t dev)
170 {
171 struct ahci_softc *sc = device_get_softc(dev);
172 pcireg_t magic;
173 u_int8_t subclass = pci_get_subclass(dev);
174 u_int8_t revid;
175
176 if (subclass == PCIS_STORAGE_IDE) {
177 revid = pci_read_config(dev, PCIR_REVID, 1);
178 magic = pci_read_config(dev, AHCI_PCI_ATI_SB600_MAGIC, 4);
179 pci_write_config(dev, AHCI_PCI_ATI_SB600_MAGIC,
180 magic | AHCI_PCI_ATI_SB600_LOCKED, 4);
181 pci_write_config(dev, PCIR_REVID,
182 (PCIC_STORAGE << 24) |
183 (PCIS_STORAGE_SATA << 16) |
184 (PCIP_STORAGE_SATA_AHCI_1_0 << 8) |
185 revid, 4);
186 pci_write_config(dev, AHCI_PCI_ATI_SB600_MAGIC, magic, 4);
187 }
188
189 sc->sc_flags |= AHCI_F_IGN_FR;
190 return (ahci_pci_attach(dev));
191 }
192
193 static int
ahci_nvidia_mcp_attach(device_t dev)194 ahci_nvidia_mcp_attach(device_t dev)
195 {
196 struct ahci_softc *sc = device_get_softc(dev);
197
198 sc->sc_flags |= AHCI_F_IGN_FR;
199 return (ahci_pci_attach(dev));
200 }
201
202 static int
ahci_pci_attach(device_t dev)203 ahci_pci_attach(device_t dev)
204 {
205 struct ahci_softc *sc = device_get_softc(dev);
206 struct ahci_port *ap;
207 const char *gen;
208 uint16_t vid, did;
209 u_int32_t pi, reg;
210 u_int32_t cap, cap2;
211 u_int32_t chip;
212 u_int irq_flags;
213 bus_addr_t addr;
214 int i, error, msi_enable, rev, fbs;
215 char revbuf[32];
216
217 if (pci_read_config(dev, PCIR_COMMAND, 2) & 0x0400) {
218 device_printf(dev, "BIOS disabled PCI interrupt, "
219 "re-enabling\n");
220 pci_write_config(dev, PCIR_COMMAND,
221 pci_read_config(dev, PCIR_COMMAND, 2) & ~0x0400, 2);
222 }
223
224 /*
225 * Chip quirks. Sigh. The AHCI spec is not in the least confusing
226 * when it comes to how the FR and CR bits work, but some AHCI
227 * chipsets (aka Marvell) either don't have the bits at all or they
228 * implement them poorly.
229 */
230 chip = ((uint16_t)pci_get_device(dev) << 16) |
231 (uint16_t)pci_get_vendor(dev);
232
233 switch(chip) {
234 case 0x91721b4b:
235 device_printf(dev,
236 "Enable 88SE9172 workarounds for broken chip\n");
237 sc->sc_flags |= AHCI_F_IGN_FR;
238 sc->sc_flags |= AHCI_F_IGN_CR;
239 break;
240 case 0x92151b4b:
241 device_printf(dev,
242 "Enable 88SE9215 workarounds for broken chip\n");
243 sc->sc_flags |= AHCI_F_IGN_FR;
244 sc->sc_flags |= AHCI_F_IGN_CR;
245 break;
246 case 0x92301b4b:
247 device_printf(dev,
248 "Enable 88SE9230 workarounds for broken chip\n");
249 sc->sc_flags |= AHCI_F_CYCLE_FR;
250 break;
251 case 0x07f410de:
252 device_printf(dev,
253 "Enable nForce 630i workarounds for broken chip\n");
254 sc->sc_flags |= AHCI_F_IGN_FR;
255 sc->sc_flags |= AHCI_F_IGN_CR;
256 break;
257 }
258
259 sc->sc_dev = dev;
260
261 /*
262 * Map the AHCI controller's IRQ and BAR(5) (hardware registers)
263 */
264 msi_enable = ahci_msi_enable;
265
266 vid = pci_get_vendor(dev);
267 did = pci_get_device(dev);
268 rev = pci_get_revid(dev);
269 for (i = 0; i < NELEM(ahci_msi_blacklist); ++i) {
270 const struct ahci_pciid *id = &ahci_msi_blacklist[i];
271
272 if (vid == id->ahci_vid && did == id->ahci_did) {
273 if (id->ahci_rev < 0 || id->ahci_rev == rev) {
274 msi_enable = 0;
275 break;
276 }
277 }
278 }
279
280 sc->sc_irq_type = pci_alloc_1intr(dev, msi_enable,
281 &sc->sc_rid_irq, &irq_flags);
282
283 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_rid_irq,
284 irq_flags);
285 if (sc->sc_irq == NULL) {
286 device_printf(dev, "unable to map interrupt\n");
287 ahci_pci_detach(dev);
288 return (ENXIO);
289 }
290
291 /*
292 * When mapping the register window store the tag and handle
293 * separately so we can use the tag with per-port bus handle
294 * sub-spaces.
295 */
296 sc->sc_rid_regs = PCIR_BAR(5);
297 sc->sc_regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
298 &sc->sc_rid_regs, RF_ACTIVE);
299 if (sc->sc_regs == NULL) {
300 device_printf(dev, "unable to map registers\n");
301 ahci_pci_detach(dev);
302 return (ENXIO);
303 }
304 sc->sc_iot = rman_get_bustag(sc->sc_regs);
305 sc->sc_ioh = rman_get_bushandle(sc->sc_regs);
306
307 /*
308 * Initialize the chipset and then set the interrupt vector up
309 */
310 error = ahci_init(sc);
311 if (error) {
312 ahci_pci_detach(dev);
313 return (ENXIO);
314 }
315
316 /*
317 * Get the AHCI capabilities and max number of concurrent
318 * command tags and set up the DMA tags. Adjust the saved
319 * sc_cap according to override flags.
320 */
321 cap = ahci_read(sc, AHCI_REG_CAP);
322 if (sc->sc_flags & AHCI_F_NO_NCQ)
323 cap &= ~AHCI_REG_CAP_SNCQ;
324 if (sc->sc_flags & AHCI_F_FORCE_FBSS)
325 cap |= AHCI_REG_CAP_FBSS;
326 if (sc->sc_flags & AHCI_F_FORCE_SCLO)
327 cap |= AHCI_REG_CAP_SCLO;
328 sc->sc_cap = cap;
329
330 /*
331 * We assume at least 4 commands.
332 */
333 sc->sc_ncmds = AHCI_REG_CAP_NCS(cap);
334 if (sc->sc_ncmds < 4) {
335 device_printf(dev, "NCS must probe a value >= 4\n");
336 ahci_pci_detach(dev);
337 return (ENXIO);
338 }
339
340 addr = (cap & AHCI_REG_CAP_S64A) ?
341 BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT;
342
343 /*
344 * DMA tags for allocation of DMA memory buffers, lists, and so
345 * forth. These are typically per-port.
346 *
347 * When FIS-based switching is supported we need a rfis for
348 * each target (4K total). The spec also requires 4K alignment
349 * for this case.
350 */
351 fbs = (cap & AHCI_REG_CAP_FBSS) ? 16 : 1;
352 error = 0;
353
354 sc->sc_rfis_size = sizeof(struct ahci_rfis) * fbs;
355
356 error += bus_dma_tag_create(
357 NULL, /* parent tag */
358 sc->sc_rfis_size, /* alignment */
359 PAGE_SIZE, /* boundary */
360 addr, /* loaddr? */
361 BUS_SPACE_MAXADDR, /* hiaddr */
362 sc->sc_rfis_size, /* [max]size */
363 1, /* maxsegs */
364 sc->sc_rfis_size, /* maxsegsz */
365 0, /* flags */
366 &sc->sc_tag_rfis); /* return tag */
367
368 sc->sc_cmdlist_size = sc->sc_ncmds * sizeof(struct ahci_cmd_hdr);
369
370 error += bus_dma_tag_create(
371 NULL, /* parent tag */
372 32, /* alignment */
373 4096 * 1024, /* boundary */
374 addr, /* loaddr? */
375 BUS_SPACE_MAXADDR, /* hiaddr */
376 sc->sc_cmdlist_size,
377 1, /* maxsegs */
378 sc->sc_cmdlist_size,
379 0, /* flags */
380 &sc->sc_tag_cmdh); /* return tag */
381
382 /*
383 * NOTE: ahci_cmd_table is sized to a power of 2
384 */
385 error += bus_dma_tag_create(
386 NULL, /* parent tag */
387 sizeof(struct ahci_cmd_table), /* alignment */
388 4096 * 1024, /* boundary */
389 addr, /* loaddr? */
390 BUS_SPACE_MAXADDR, /* hiaddr */
391 sc->sc_ncmds * sizeof(struct ahci_cmd_table),
392 1, /* maxsegs */
393 sc->sc_ncmds * sizeof(struct ahci_cmd_table),
394 0, /* flags */
395 &sc->sc_tag_cmdt); /* return tag */
396
397 /*
398 * The data tag is used for later dmamaps and not immediately
399 * allocated.
400 */
401 error += bus_dma_tag_create(
402 NULL, /* parent tag */
403 4, /* alignment */
404 0, /* boundary */
405 addr, /* loaddr? */
406 BUS_SPACE_MAXADDR, /* hiaddr */
407 4096 * 1024, /* maxiosize */
408 AHCI_MAX_PRDT, /* maxsegs */
409 65536, /* maxsegsz */
410 0, /* flags */
411 &sc->sc_tag_data); /* return tag */
412
413 if (error) {
414 device_printf(dev, "unable to create dma tags\n");
415 ahci_pci_detach(dev);
416 return (ENXIO);
417 }
418
419 switch (cap & AHCI_REG_CAP_ISS) {
420 case AHCI_REG_CAP_ISS_G1:
421 gen = "1 (1.5Gbps)";
422 break;
423 case AHCI_REG_CAP_ISS_G2:
424 gen = "2 (3Gbps)";
425 break;
426 case AHCI_REG_CAP_ISS_G3:
427 gen = "3 (6Gbps)";
428 break;
429 default:
430 gen = "unknown";
431 break;
432 }
433
434 /* check the revision */
435 reg = ahci_read(sc, AHCI_REG_VS);
436
437 if (reg & 0x0000FF) {
438 ksnprintf(revbuf, sizeof(revbuf), "AHCI %d.%d.%d",
439 (reg >> 16), (uint8_t)(reg >> 8), (uint8_t)reg);
440 } else {
441 ksnprintf(revbuf, sizeof(revbuf), "AHCI %d.%d",
442 (reg >> 16), (uint8_t)(reg >> 8));
443 }
444 sc->sc_vers = reg;
445
446 if (reg >= AHCI_REG_VS_1_3) {
447 cap2 = ahci_read(sc, AHCI_REG_CAP2);
448 device_printf(dev,
449 "%s cap 0x%pb%i cap2 0x%pb%i, %d ports, "
450 "%d tags/port, gen %s\n",
451 revbuf,
452 AHCI_FMT_CAP, cap,
453 AHCI_FMT_CAP2, cap2,
454 AHCI_REG_CAP_NP(cap), sc->sc_ncmds, gen);
455 } else {
456 cap2 = 0;
457 device_printf(dev,
458 "%s cap 0x%pb%i, %d ports, "
459 "%d tags/port, gen %s\n",
460 revbuf,
461 AHCI_FMT_CAP, cap,
462 AHCI_REG_CAP_NP(cap), sc->sc_ncmds, gen);
463 }
464 sc->sc_cap2 = cap2;
465
466 pi = ahci_read(sc, AHCI_REG_PI);
467 DPRINTF(AHCI_D_VERBOSE, "%s: ports implemented: 0x%08x\n",
468 DEVNAME(sc), pi);
469
470 sc->sc_ipm_disable = AHCI_PREG_SCTL_IPM_NOPARTIAL |
471 AHCI_PREG_SCTL_IPM_NOSLUMBER;
472 if (sc->sc_cap2 & AHCI_REG_CAP2_SDS)
473 sc->sc_ipm_disable |= AHCI_PREG_SCTL_IPM_NODEVSLP;
474
475 #ifdef AHCI_COALESCE
476 /* Naive coalescing support - enable for all ports. */
477 if (cap & AHCI_REG_CAP_CCCS) {
478 u_int16_t ccc_timeout = 20;
479 u_int8_t ccc_numcomplete = 12;
480 u_int32_t ccc_ctl;
481
482 /* disable coalescing during reconfiguration. */
483 ccc_ctl = ahci_read(sc, AHCI_REG_CCC_CTL);
484 ccc_ctl &= ~0x00000001;
485 ahci_write(sc, AHCI_REG_CCC_CTL, ccc_ctl);
486
487 sc->sc_ccc_mask = 1 << AHCI_REG_CCC_CTL_INT(ccc_ctl);
488 if (pi & sc->sc_ccc_mask) {
489 /* A conflict with the implemented port list? */
490 printf("%s: coalescing interrupt/implemented port list "
491 "conflict, PI: %08x, ccc_mask: %08x\n",
492 DEVNAME(sc), pi, sc->sc_ccc_mask);
493 sc->sc_ccc_mask = 0;
494 goto noccc;
495 }
496
497 /* ahci_port_start will enable each port when it starts. */
498 sc->sc_ccc_ports = pi;
499 sc->sc_ccc_ports_cur = 0;
500
501 /* program thresholds and enable overall coalescing. */
502 ccc_ctl &= ~0xffffff00;
503 ccc_ctl |= (ccc_timeout << 16) | (ccc_numcomplete << 8);
504 ahci_write(sc, AHCI_REG_CCC_CTL, ccc_ctl);
505 ahci_write(sc, AHCI_REG_CCC_PORTS, 0);
506 ahci_write(sc, AHCI_REG_CCC_CTL, ccc_ctl | 1);
507 }
508 noccc:
509 #endif
510 /*
511 * Allocate per-port resources
512 *
513 * Ignore attach errors, leave the port intact for
514 * rescan and continue the loop.
515 *
516 * All ports are attached in parallel but the CAM scan-bus
517 * is held up until all ports are attached so we get a deterministic
518 * order.
519 */
520 for (i = 0; error == 0 && i < AHCI_MAX_PORTS; i++) {
521 if ((pi & (1 << i)) == 0) {
522 /* dont allocate stuff if the port isnt implemented */
523 continue;
524 }
525 error = ahci_port_alloc(sc, i);
526 }
527
528 /*
529 * Setup the interrupt vector and enable interrupts. Note that
530 * since the irq may be shared we do not set it up until we are
531 * ready to go.
532 */
533 if (error == 0) {
534 error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE |
535 INTR_HIFREQ,
536 ahci_intr, sc,
537 &sc->sc_irq_handle, NULL);
538 }
539
540 if (error) {
541 device_printf(dev, "unable to install interrupt\n");
542 ahci_pci_detach(dev);
543 return (ENXIO);
544 }
545
546 /*
547 * Before marking the sc as good, which allows the interrupt
548 * subsystem to operate on the ports, wait for all the port threads
549 * to get past their initial pre-probe init. Otherwise an interrupt
550 * may try to process the port before it has been initialized.
551 */
552 for (i = 0; i < AHCI_MAX_PORTS; i++) {
553 if ((ap = sc->sc_ports[i]) != NULL) {
554 while (ap->ap_signal & AP_SIGF_THREAD_SYNC)
555 tsleep(&ap->ap_signal, 0, "ahprb1", hz);
556 }
557 }
558
559 /*
560 * Master interrupt enable, and call ahci_intr() in case we race
561 * our AHCI_F_INT_GOOD flag.
562 */
563 crit_enter();
564 ahci_write(sc, AHCI_REG_GHC, AHCI_REG_GHC_AE | AHCI_REG_GHC_IE);
565 sc->sc_flags |= AHCI_F_INT_GOOD;
566 crit_exit();
567 ahci_intr(sc);
568
569 /*
570 * Synchronously wait for some of the AHCI devices to initialize.
571 *
572 * All ports are probing in parallel. Wait for them to finish
573 * and then issue the cam attachment and bus scan serially so
574 * the 'da' assignments are deterministic.
575 */
576 for (i = 0; i < AHCI_MAX_PORTS && ahci_synchronous_boot; i++) {
577 if ((ap = sc->sc_ports[i]) != NULL) {
578 while (ap->ap_signal & AP_SIGF_INIT)
579 tsleep(&ap->ap_signal, 0, "ahprb2", hz);
580 ahci_os_lock_port(ap);
581 if (ahci_cam_attach(ap) == 0) {
582 ahci_cam_changed(ap, NULL, -1);
583 ahci_os_unlock_port(ap);
584 while ((ap->ap_flags & AP_F_SCAN_COMPLETED) == 0) {
585 tsleep(&ap->ap_flags, 0, "ahprb3", hz);
586 }
587 } else {
588 ahci_os_unlock_port(ap);
589 }
590 }
591 }
592
593 return(0);
594 }
595
596 /*
597 * Device unload / detachment
598 */
599 static int
ahci_pci_detach(device_t dev)600 ahci_pci_detach(device_t dev)
601 {
602 struct ahci_softc *sc = device_get_softc(dev);
603 struct ahci_port *ap;
604 int i;
605
606 /*
607 * Disable the controller and de-register the interrupt, if any.
608 *
609 * XXX interlock last interrupt?
610 */
611 sc->sc_flags &= ~AHCI_F_INT_GOOD;
612 if (sc->sc_regs)
613 ahci_write(sc, AHCI_REG_GHC, 0);
614
615 if (sc->sc_irq_handle) {
616 bus_teardown_intr(dev, sc->sc_irq, sc->sc_irq_handle);
617 sc->sc_irq_handle = NULL;
618 }
619
620 /*
621 * Free port structures and DMA memory
622 */
623 for (i = 0; i < AHCI_MAX_PORTS; i++) {
624 ap = sc->sc_ports[i];
625 if (ap) {
626 ahci_cam_detach(ap);
627 ahci_port_free(sc, i);
628 }
629 }
630
631 /*
632 * Clean up the bus space
633 */
634 if (sc->sc_irq) {
635 bus_release_resource(dev, SYS_RES_IRQ,
636 sc->sc_rid_irq, sc->sc_irq);
637 sc->sc_irq = NULL;
638 }
639
640 if (sc->sc_irq_type == PCI_INTR_TYPE_MSI)
641 pci_release_msi(dev);
642
643 if (sc->sc_regs) {
644 bus_release_resource(dev, SYS_RES_MEMORY,
645 sc->sc_rid_regs, sc->sc_regs);
646 sc->sc_regs = NULL;
647 }
648
649 if (sc->sc_tag_rfis) {
650 bus_dma_tag_destroy(sc->sc_tag_rfis);
651 sc->sc_tag_rfis = NULL;
652 }
653 if (sc->sc_tag_cmdh) {
654 bus_dma_tag_destroy(sc->sc_tag_cmdh);
655 sc->sc_tag_cmdh = NULL;
656 }
657 if (sc->sc_tag_cmdt) {
658 bus_dma_tag_destroy(sc->sc_tag_cmdt);
659 sc->sc_tag_cmdt = NULL;
660 }
661 if (sc->sc_tag_data) {
662 bus_dma_tag_destroy(sc->sc_tag_data);
663 sc->sc_tag_data = NULL;
664 }
665
666 return (0);
667 }
668