1 /* $OpenBSD: pci.c,v 1.129 2024/08/10 20:20:50 kettenis Exp $ */
2 /* $NetBSD: pci.c,v 1.31 1997/06/06 23:48:04 thorpej Exp $ */
3
4 /*
5 * Copyright (c) 1995, 1996 Christopher G. Demetriou. All rights reserved.
6 * Copyright (c) 1994 Charles Hannum. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles Hannum.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * PCI bus autoconfiguration.
36 */
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pcivar.h>
45 #include <dev/pci/pcidevs.h>
46 #include <dev/pci/ppbreg.h>
47
48 int pcimatch(struct device *, void *, void *);
49 void pciattach(struct device *, struct device *, void *);
50 int pcidetach(struct device *, int);
51 int pciactivate(struct device *, int);
52 void pci_suspend(struct pci_softc *);
53 void pci_powerdown(struct pci_softc *);
54 void pci_resume(struct pci_softc *);
55
56 struct msix_vector {
57 uint32_t mv_ma;
58 uint32_t mv_mau32;
59 uint32_t mv_md;
60 uint32_t mv_vc;
61 };
62
63 #define NMAPREG ((PCI_MAPREG_END - PCI_MAPREG_START) / \
64 sizeof(pcireg_t))
65 struct pci_dev {
66 struct device *pd_dev;
67 LIST_ENTRY(pci_dev) pd_next;
68 pcitag_t pd_tag; /* pci register tag */
69 pcireg_t pd_csr;
70 pcireg_t pd_bhlc;
71 pcireg_t pd_int;
72 pcireg_t pd_map[NMAPREG];
73 pcireg_t pd_mask[NMAPREG];
74 pcireg_t pd_msi_mc;
75 pcireg_t pd_msi_ma;
76 pcireg_t pd_msi_mau32;
77 pcireg_t pd_msi_md;
78 pcireg_t pd_msix_mc;
79 struct msix_vector *pd_msix_table;
80 int pd_pmcsr_state;
81 int pd_vga_decode;
82 };
83
84 #ifdef APERTURE
85 extern int allowaperture;
86 #endif
87
88 const struct cfattach pci_ca = {
89 sizeof(struct pci_softc), pcimatch, pciattach, pcidetach, pciactivate
90 };
91
92 struct cfdriver pci_cd = {
93 NULL, "pci", DV_DULL
94 };
95
96 int pci_ndomains;
97
98 struct proc *pci_vga_proc;
99 struct pci_softc *pci_vga_pci;
100 pcitag_t pci_vga_tag;
101
102 int pci_dopm;
103
104 int pciprint(void *, const char *);
105 int pcisubmatch(struct device *, void *, void *);
106
107 #ifdef PCI_MACHDEP_ENUMERATE_BUS
108 #define pci_enumerate_bus PCI_MACHDEP_ENUMERATE_BUS
109 #else
110 int pci_enumerate_bus(struct pci_softc *,
111 int (*)(struct pci_attach_args *), struct pci_attach_args *);
112 #endif
113 int pci_reserve_resources(struct pci_attach_args *);
114 int pci_primary_vga(struct pci_attach_args *);
115
116 /*
117 * Important note about PCI-ISA bridges:
118 *
119 * Callbacks are used to configure these devices so that ISA/EISA bridges
120 * can attach their child busses after PCI configuration is done.
121 *
122 * This works because:
123 * (1) there can be at most one ISA/EISA bridge per PCI bus, and
124 * (2) any ISA/EISA bridges must be attached to primary PCI
125 * busses (i.e. bus zero).
126 *
127 * That boils down to: there can only be one of these outstanding
128 * at a time, it is cleared when configuring PCI bus 0 before any
129 * subdevices have been found, and it is run after all subdevices
130 * of PCI bus 0 have been found.
131 *
132 * This is needed because there are some (legacy) PCI devices which
133 * can show up as ISA/EISA devices as well (the prime example of which
134 * are VGA controllers). If you attach ISA from a PCI-ISA/EISA bridge,
135 * and the bridge is seen before the video board is, the board can show
136 * up as an ISA device, and that can (bogusly) complicate the PCI device's
137 * attach code, or make the PCI device not be properly attached at all.
138 *
139 * We use the generic config_defer() facility to achieve this.
140 */
141
142 int
pcimatch(struct device * parent,void * match,void * aux)143 pcimatch(struct device *parent, void *match, void *aux)
144 {
145 struct cfdata *cf = match;
146 struct pcibus_attach_args *pba = aux;
147
148 if (strcmp(pba->pba_busname, cf->cf_driver->cd_name))
149 return (0);
150
151 /* Check the locators */
152 if (cf->pcibuscf_bus != PCIBUS_UNK_BUS &&
153 cf->pcibuscf_bus != pba->pba_bus)
154 return (0);
155
156 /* sanity */
157 if (pba->pba_bus < 0 || pba->pba_bus > 255)
158 return (0);
159
160 /*
161 * XXX check other (hardware?) indicators
162 */
163
164 return (1);
165 }
166
167 void
pciattach(struct device * parent,struct device * self,void * aux)168 pciattach(struct device *parent, struct device *self, void *aux)
169 {
170 struct pcibus_attach_args *pba = aux;
171 struct pci_softc *sc = (struct pci_softc *)self;
172
173 pci_attach_hook(parent, self, pba);
174
175 printf("\n");
176
177 LIST_INIT(&sc->sc_devs);
178
179 sc->sc_iot = pba->pba_iot;
180 sc->sc_memt = pba->pba_memt;
181 sc->sc_dmat = pba->pba_dmat;
182 sc->sc_pc = pba->pba_pc;
183 sc->sc_flags = pba->pba_flags;
184 sc->sc_ioex = pba->pba_ioex;
185 sc->sc_memex = pba->pba_memex;
186 sc->sc_pmemex = pba->pba_pmemex;
187 sc->sc_busex = pba->pba_busex;
188 sc->sc_domain = pba->pba_domain;
189 sc->sc_bus = pba->pba_bus;
190 sc->sc_bridgetag = pba->pba_bridgetag;
191 sc->sc_bridgeih = pba->pba_bridgeih;
192 sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
193 sc->sc_intrswiz = pba->pba_intrswiz;
194 sc->sc_intrtag = pba->pba_intrtag;
195
196 /* Reserve our own bus number. */
197 if (sc->sc_busex)
198 extent_alloc_region(sc->sc_busex, sc->sc_bus, 1, EX_NOWAIT);
199
200 pci_enumerate_bus(sc, pci_reserve_resources, NULL);
201
202 /* Find the VGA device that's currently active. */
203 if (pci_enumerate_bus(sc, pci_primary_vga, NULL))
204 pci_vga_pci = sc;
205
206 pci_enumerate_bus(sc, NULL, NULL);
207 }
208
209 int
pcidetach(struct device * self,int flags)210 pcidetach(struct device *self, int flags)
211 {
212 return pci_detach_devices((struct pci_softc *)self, flags);
213 }
214
215 int
pciactivate(struct device * self,int act)216 pciactivate(struct device *self, int act)
217 {
218 int rv = 0;
219
220 switch (act) {
221 case DVACT_SUSPEND:
222 rv = config_activate_children(self, act);
223 pci_suspend((struct pci_softc *)self);
224 break;
225 case DVACT_RESUME:
226 pci_resume((struct pci_softc *)self);
227 rv = config_activate_children(self, act);
228 break;
229 case DVACT_POWERDOWN:
230 rv = config_activate_children(self, act);
231 pci_powerdown((struct pci_softc *)self);
232 break;
233 default:
234 rv = config_activate_children(self, act);
235 break;
236 }
237 return (rv);
238 }
239
240 void
pci_suspend(struct pci_softc * sc)241 pci_suspend(struct pci_softc *sc)
242 {
243 struct pci_dev *pd;
244 pcireg_t bhlc, reg;
245 int off, i;
246
247 LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
248 /*
249 * Only handle header type 0 here; PCI-PCI bridges and
250 * CardBus bridges need special handling, which will
251 * be done in their specific drivers.
252 */
253 bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
254 if (PCI_HDRTYPE_TYPE(bhlc) != 0)
255 continue;
256
257 /* Save registers that may get lost. */
258 for (i = 0; i < NMAPREG; i++)
259 pd->pd_map[i] = pci_conf_read(sc->sc_pc, pd->pd_tag,
260 PCI_MAPREG_START + (i * 4));
261 pd->pd_csr = pci_conf_read(sc->sc_pc, pd->pd_tag,
262 PCI_COMMAND_STATUS_REG);
263 pd->pd_bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag,
264 PCI_BHLC_REG);
265 pd->pd_int = pci_conf_read(sc->sc_pc, pd->pd_tag,
266 PCI_INTERRUPT_REG);
267
268 if (pci_get_capability(sc->sc_pc, pd->pd_tag,
269 PCI_CAP_MSI, &off, ®)) {
270 pd->pd_msi_ma = pci_conf_read(sc->sc_pc, pd->pd_tag,
271 off + PCI_MSI_MA);
272 if (reg & PCI_MSI_MC_C64) {
273 pd->pd_msi_mau32 = pci_conf_read(sc->sc_pc,
274 pd->pd_tag, off + PCI_MSI_MAU32);
275 pd->pd_msi_md = pci_conf_read(sc->sc_pc,
276 pd->pd_tag, off + PCI_MSI_MD64);
277 } else {
278 pd->pd_msi_md = pci_conf_read(sc->sc_pc,
279 pd->pd_tag, off + PCI_MSI_MD32);
280 }
281 pd->pd_msi_mc = reg;
282 }
283
284 pci_suspend_msix(sc->sc_pc, pd->pd_tag, sc->sc_memt,
285 &pd->pd_msix_mc, pd->pd_msix_table);
286 }
287 }
288
289 void
pci_powerdown(struct pci_softc * sc)290 pci_powerdown(struct pci_softc *sc)
291 {
292 struct pci_dev *pd;
293 pcireg_t bhlc;
294
295 LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
296 /*
297 * Only handle header type 0 here; PCI-PCI bridges and
298 * CardBus bridges need special handling, which will
299 * be done in their specific drivers.
300 */
301 bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
302 if (PCI_HDRTYPE_TYPE(bhlc) != 0)
303 continue;
304
305 if (pci_dopm) {
306 /*
307 * Place the device into the lowest possible
308 * power state.
309 */
310 pd->pd_pmcsr_state = pci_get_powerstate(sc->sc_pc,
311 pd->pd_tag);
312 pci_set_powerstate(sc->sc_pc, pd->pd_tag,
313 pci_min_powerstate(sc->sc_pc, pd->pd_tag));
314 }
315 }
316 }
317
318 void
pci_resume(struct pci_softc * sc)319 pci_resume(struct pci_softc *sc)
320 {
321 struct pci_dev *pd;
322 pcireg_t bhlc, reg;
323 int off, i;
324
325 LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
326 /*
327 * Only handle header type 0 here; PCI-PCI bridges and
328 * CardBus bridges need special handling, which will
329 * be done in their specific drivers.
330 */
331 bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
332 if (PCI_HDRTYPE_TYPE(bhlc) != 0)
333 continue;
334
335 /* Restore power. */
336 if (pci_dopm)
337 pci_set_powerstate(sc->sc_pc, pd->pd_tag,
338 pd->pd_pmcsr_state);
339
340 /* Restore the registers saved above. */
341 for (i = 0; i < NMAPREG; i++)
342 pci_conf_write(sc->sc_pc, pd->pd_tag,
343 PCI_MAPREG_START + (i * 4), pd->pd_map[i]);
344 reg = pci_conf_read(sc->sc_pc, pd->pd_tag,
345 PCI_COMMAND_STATUS_REG);
346 pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_COMMAND_STATUS_REG,
347 (reg & 0xffff0000) | (pd->pd_csr & 0x0000ffff));
348 pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG,
349 pd->pd_bhlc);
350 pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_INTERRUPT_REG,
351 pd->pd_int);
352
353 if (pci_get_capability(sc->sc_pc, pd->pd_tag,
354 PCI_CAP_MSI, &off, ®)) {
355 pci_conf_write(sc->sc_pc, pd->pd_tag,
356 off + PCI_MSI_MA, pd->pd_msi_ma);
357 if (reg & PCI_MSI_MC_C64) {
358 pci_conf_write(sc->sc_pc, pd->pd_tag,
359 off + PCI_MSI_MAU32, pd->pd_msi_mau32);
360 pci_conf_write(sc->sc_pc, pd->pd_tag,
361 off + PCI_MSI_MD64, pd->pd_msi_md);
362 } else {
363 pci_conf_write(sc->sc_pc, pd->pd_tag,
364 off + PCI_MSI_MD32, pd->pd_msi_md);
365 }
366 pci_conf_write(sc->sc_pc, pd->pd_tag,
367 off + PCI_MSI_MC, pd->pd_msi_mc);
368 }
369
370 pci_resume_msix(sc->sc_pc, pd->pd_tag, sc->sc_memt,
371 pd->pd_msix_mc, pd->pd_msix_table);
372 }
373 }
374
375 int
pciprint(void * aux,const char * pnp)376 pciprint(void *aux, const char *pnp)
377 {
378 struct pci_attach_args *pa = aux;
379 char devinfo[256];
380
381 if (pnp) {
382 pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo,
383 sizeof devinfo);
384 printf("%s at %s", devinfo, pnp);
385 }
386 printf(" dev %d function %d", pa->pa_device, pa->pa_function);
387 if (!pnp) {
388 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo,
389 sizeof devinfo);
390 printf(" %s", devinfo);
391 }
392
393 return (UNCONF);
394 }
395
396 int
pcisubmatch(struct device * parent,void * match,void * aux)397 pcisubmatch(struct device *parent, void *match, void *aux)
398 {
399 struct cfdata *cf = match;
400 struct pci_attach_args *pa = aux;
401
402 if (cf->pcicf_dev != PCI_UNK_DEV &&
403 cf->pcicf_dev != pa->pa_device)
404 return (0);
405 if (cf->pcicf_function != PCI_UNK_FUNCTION &&
406 cf->pcicf_function != pa->pa_function)
407 return (0);
408
409 return ((*cf->cf_attach->ca_match)(parent, match, aux));
410 }
411
412 int
pci_probe_device(struct pci_softc * sc,pcitag_t tag,int (* match)(struct pci_attach_args *),struct pci_attach_args * pap)413 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
414 int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
415 {
416 pci_chipset_tag_t pc = sc->sc_pc;
417 struct pci_attach_args pa;
418 struct pci_dev *pd;
419 pcireg_t id, class, intr, bhlcr, cap;
420 int pin, bus, device, function;
421 int off, ret = 0;
422 uint64_t addr;
423
424 pci_decompose_tag(pc, tag, &bus, &device, &function);
425
426 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
427 if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
428 return (0);
429
430 id = pci_conf_read(pc, tag, PCI_ID_REG);
431 class = pci_conf_read(pc, tag, PCI_CLASS_REG);
432
433 /* Invalid vendor ID value? */
434 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
435 return (0);
436 /* XXX Not invalid, but we've done this ~forever. */
437 if (PCI_VENDOR(id) == 0)
438 return (0);
439
440 pa.pa_iot = sc->sc_iot;
441 pa.pa_memt = sc->sc_memt;
442 pa.pa_dmat = sc->sc_dmat;
443 pa.pa_pc = pc;
444 pa.pa_ioex = sc->sc_ioex;
445 pa.pa_memex = sc->sc_memex;
446 pa.pa_pmemex = sc->sc_pmemex;
447 pa.pa_busex = sc->sc_busex;
448 pa.pa_domain = sc->sc_domain;
449 pa.pa_bus = bus;
450 pa.pa_device = device;
451 pa.pa_function = function;
452 pa.pa_tag = tag;
453 pa.pa_id = id;
454 pa.pa_class = class;
455 pa.pa_bridgetag = sc->sc_bridgetag;
456 pa.pa_bridgeih = sc->sc_bridgeih;
457
458 /* This is a simplification of the NetBSD code.
459 We don't support turning off I/O or memory
460 on broken hardware. <csapuntz@stanford.edu> */
461 pa.pa_flags = sc->sc_flags;
462 pa.pa_flags |= PCI_FLAGS_IO_ENABLED | PCI_FLAGS_MEM_ENABLED;
463
464 if (sc->sc_bridgetag == NULL) {
465 pa.pa_intrswiz = 0;
466 pa.pa_intrtag = tag;
467 } else {
468 pa.pa_intrswiz = sc->sc_intrswiz + device;
469 pa.pa_intrtag = sc->sc_intrtag;
470 }
471
472 intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
473
474 pin = PCI_INTERRUPT_PIN(intr);
475 pa.pa_rawintrpin = pin;
476 if (pin == PCI_INTERRUPT_PIN_NONE) {
477 /* no interrupt */
478 pa.pa_intrpin = 0;
479 } else {
480 /*
481 * swizzle it based on the number of busses we're
482 * behind and our device number.
483 */
484 pa.pa_intrpin = /* XXX */
485 ((pin + pa.pa_intrswiz - 1) % 4) + 1;
486 }
487 pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
488
489 if (pci_get_ht_capability(pc, tag, PCI_HT_CAP_MSI, &off, &cap)) {
490 /*
491 * XXX Should we enable MSI mapping ourselves on
492 * systems that have it disabled?
493 */
494 if (cap & PCI_HT_MSI_ENABLED) {
495 if ((cap & PCI_HT_MSI_FIXED) == 0) {
496 addr = pci_conf_read(pc, tag,
497 off + PCI_HT_MSI_ADDR);
498 addr |= (uint64_t)pci_conf_read(pc, tag,
499 off + PCI_HT_MSI_ADDR_HI32) << 32;
500 } else
501 addr = PCI_HT_MSI_FIXED_ADDR;
502
503 /*
504 * XXX This will fail to enable MSI on systems
505 * that don't use the canonical address.
506 */
507 if (addr == PCI_HT_MSI_FIXED_ADDR)
508 pa.pa_flags |= PCI_FLAGS_MSI_ENABLED;
509 }
510 }
511
512 /*
513 * Give the MD code a chance to alter pci_attach_args and/or
514 * skip devices.
515 */
516 if (pci_probe_device_hook(pc, &pa) != 0)
517 return (0);
518
519 if (match != NULL) {
520 ret = (*match)(&pa);
521 if (ret != 0 && pap != NULL)
522 *pap = pa;
523 } else {
524 pcireg_t address, csr;
525 int i, reg, reg_start, reg_end;
526 int s;
527
528 pd = malloc(sizeof *pd, M_DEVBUF, M_ZERO | M_WAITOK);
529 pd->pd_tag = tag;
530 LIST_INSERT_HEAD(&sc->sc_devs, pd, pd_next);
531
532 switch (PCI_HDRTYPE_TYPE(bhlcr)) {
533 case 0:
534 reg_start = PCI_MAPREG_START;
535 reg_end = PCI_MAPREG_END;
536 break;
537 case 1: /* PCI-PCI bridge */
538 reg_start = PCI_MAPREG_START;
539 reg_end = PCI_MAPREG_PPB_END;
540 break;
541 case 2: /* PCI-CardBus bridge */
542 reg_start = PCI_MAPREG_START;
543 reg_end = PCI_MAPREG_PCB_END;
544 break;
545 default:
546 return (0);
547 }
548
549 pd->pd_msix_table = pci_alloc_msix_table(sc->sc_pc, pd->pd_tag);
550
551 s = splhigh();
552 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
553 if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
554 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr &
555 ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE));
556
557 for (reg = reg_start, i = 0; reg < reg_end; reg += 4, i++) {
558 address = pci_conf_read(pc, tag, reg);
559 pci_conf_write(pc, tag, reg, 0xffffffff);
560 pd->pd_mask[i] = pci_conf_read(pc, tag, reg);
561 pci_conf_write(pc, tag, reg, address);
562 }
563
564 if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
565 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
566 splx(s);
567
568 if ((PCI_CLASS(class) == PCI_CLASS_DISPLAY &&
569 PCI_SUBCLASS(class) == PCI_SUBCLASS_DISPLAY_VGA) ||
570 (PCI_CLASS(class) == PCI_CLASS_PREHISTORIC &&
571 PCI_SUBCLASS(class) == PCI_SUBCLASS_PREHISTORIC_VGA))
572 pd->pd_vga_decode = 1;
573
574 pd->pd_dev = config_found_sm(&sc->sc_dev, &pa, pciprint,
575 pcisubmatch);
576 if (pd->pd_dev)
577 pci_dev_postattach(pd->pd_dev, &pa);
578 }
579
580 return (ret);
581 }
582
583 int
pci_detach_devices(struct pci_softc * sc,int flags)584 pci_detach_devices(struct pci_softc *sc, int flags)
585 {
586 struct pci_dev *pd, *next;
587 int ret;
588
589 ret = config_detach_children(&sc->sc_dev, flags);
590 if (ret != 0)
591 return (ret);
592
593 for (pd = LIST_FIRST(&sc->sc_devs); pd != NULL; pd = next) {
594 pci_free_msix_table(sc->sc_pc, pd->pd_tag, pd->pd_msix_table);
595 next = LIST_NEXT(pd, pd_next);
596 free(pd, M_DEVBUF, sizeof *pd);
597 }
598 LIST_INIT(&sc->sc_devs);
599
600 return (0);
601 }
602
603 int
pci_get_capability(pci_chipset_tag_t pc,pcitag_t tag,int capid,int * offset,pcireg_t * value)604 pci_get_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
605 int *offset, pcireg_t *value)
606 {
607 pcireg_t reg;
608 unsigned int ofs;
609
610 reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
611 if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
612 return (0);
613
614 /* Determine the Capability List Pointer register to start with. */
615 reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
616 switch (PCI_HDRTYPE_TYPE(reg)) {
617 case 0: /* standard device header */
618 case 1: /* PCI-PCI bridge header */
619 ofs = PCI_CAPLISTPTR_REG;
620 break;
621 case 2: /* PCI-CardBus bridge header */
622 ofs = PCI_CARDBUS_CAPLISTPTR_REG;
623 break;
624 default:
625 return (0);
626 }
627
628 ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
629 while (ofs != 0) {
630 /*
631 * Some devices, like parts of the NVIDIA C51 chipset,
632 * have a broken Capabilities List. So we need to do
633 * a sanity check here.
634 */
635 if ((ofs & 3) || (ofs < 0x40))
636 return (0);
637 reg = pci_conf_read(pc, tag, ofs);
638 if (PCI_CAPLIST_CAP(reg) == capid) {
639 if (offset)
640 *offset = ofs;
641 if (value)
642 *value = reg;
643 return (1);
644 }
645 ofs = PCI_CAPLIST_NEXT(reg);
646 }
647
648 return (0);
649 }
650
651 int
pci_get_ht_capability(pci_chipset_tag_t pc,pcitag_t tag,int capid,int * offset,pcireg_t * value)652 pci_get_ht_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
653 int *offset, pcireg_t *value)
654 {
655 pcireg_t reg;
656 unsigned int ofs;
657
658 if (pci_get_capability(pc, tag, PCI_CAP_HT, &ofs, NULL) == 0)
659 return (0);
660
661 while (ofs != 0) {
662 #ifdef DIAGNOSTIC
663 if ((ofs & 3) || (ofs < 0x40))
664 panic("pci_get_ht_capability");
665 #endif
666 reg = pci_conf_read(pc, tag, ofs);
667 if (PCI_HT_CAP(reg) == capid) {
668 if (offset)
669 *offset = ofs;
670 if (value)
671 *value = reg;
672 return (1);
673 }
674 ofs = PCI_CAPLIST_NEXT(reg);
675 }
676
677 return (0);
678 }
679
680 int
pci_get_ext_capability(pci_chipset_tag_t pc,pcitag_t tag,int capid,int * offset,pcireg_t * value)681 pci_get_ext_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
682 int *offset, pcireg_t *value)
683 {
684 pcireg_t reg;
685 unsigned int ofs;
686
687 /* Make sure this is a PCI Express device. */
688 if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, NULL, NULL) == 0)
689 return (0);
690
691 /* Scan PCI Express extended capabilities. */
692 ofs = PCI_PCIE_ECAP;
693 while (ofs != 0) {
694 #ifdef DIAGNOSTIC
695 if ((ofs & 3) || (ofs < PCI_PCIE_ECAP))
696 panic("pci_get_ext_capability");
697 #endif
698 reg = pci_conf_read(pc, tag, ofs);
699 if (PCI_PCIE_ECAP_ID(reg) == capid) {
700 if (offset)
701 *offset = ofs;
702 if (value)
703 *value = reg;
704 return (1);
705 }
706 ofs = PCI_PCIE_ECAP_NEXT(reg);
707 }
708
709 return (0);
710 }
711
712 uint16_t
pci_requester_id(pci_chipset_tag_t pc,pcitag_t tag)713 pci_requester_id(pci_chipset_tag_t pc, pcitag_t tag)
714 {
715 int bus, dev, func;
716
717 pci_decompose_tag(pc, tag, &bus, &dev, &func);
718 return ((bus << 8) | (dev << 3) | func);
719 }
720
721 int
pci_find_device(struct pci_attach_args * pa,int (* match)(struct pci_attach_args *))722 pci_find_device(struct pci_attach_args *pa,
723 int (*match)(struct pci_attach_args *))
724 {
725 extern struct cfdriver pci_cd;
726 struct device *pcidev;
727 int i;
728
729 for (i = 0; i < pci_cd.cd_ndevs; i++) {
730 pcidev = pci_cd.cd_devs[i];
731 if (pcidev != NULL &&
732 pci_enumerate_bus((struct pci_softc *)pcidev,
733 match, pa) != 0)
734 return (1);
735 }
736 return (0);
737 }
738
739 int
pci_get_powerstate(pci_chipset_tag_t pc,pcitag_t tag)740 pci_get_powerstate(pci_chipset_tag_t pc, pcitag_t tag)
741 {
742 pcireg_t reg;
743 int offset;
744
745 if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) {
746 reg = pci_conf_read(pc, tag, offset + PCI_PMCSR);
747 return (reg & PCI_PMCSR_STATE_MASK);
748 }
749 return (PCI_PMCSR_STATE_D0);
750 }
751
752 int
pci_set_powerstate(pci_chipset_tag_t pc,pcitag_t tag,int state)753 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, int state)
754 {
755 pcireg_t id, reg;
756 int offset, ostate = state;
757 int d3_delay = 10 * 1000;
758
759 /* Some AMD Ryzen xHCI controllers need a bit more time to wake up. */
760 id = pci_conf_read(pc, tag, PCI_ID_REG);
761 if (PCI_VENDOR(id) == PCI_VENDOR_AMD) {
762 switch (PCI_PRODUCT(id)) {
763 case PCI_PRODUCT_AMD_17_1X_XHCI_1:
764 case PCI_PRODUCT_AMD_17_1X_XHCI_2:
765 case PCI_PRODUCT_AMD_17_6X_XHCI:
766 d3_delay = 20 * 1000;
767 default:
768 break;
769 }
770 }
771
772 /*
773 * Warn the firmware that we are going to put the device
774 * into the given state.
775 */
776 pci_set_powerstate_md(pc, tag, state, 1);
777
778 if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) {
779 if (state == PCI_PMCSR_STATE_D3) {
780 /*
781 * The PCI Power Management spec says we
782 * should disable I/O and memory space as well
783 * as bus mastering before we place the device
784 * into D3.
785 */
786 reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
787 reg &= ~PCI_COMMAND_IO_ENABLE;
788 reg &= ~PCI_COMMAND_MEM_ENABLE;
789 reg &= ~PCI_COMMAND_MASTER_ENABLE;
790 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, reg);
791 }
792 reg = pci_conf_read(pc, tag, offset + PCI_PMCSR);
793 if ((reg & PCI_PMCSR_STATE_MASK) != state) {
794 ostate = reg & PCI_PMCSR_STATE_MASK;
795
796 pci_conf_write(pc, tag, offset + PCI_PMCSR,
797 (reg & ~PCI_PMCSR_STATE_MASK) | state);
798 if (state == PCI_PMCSR_STATE_D3 ||
799 ostate == PCI_PMCSR_STATE_D3)
800 delay(d3_delay);
801 }
802 }
803
804 /*
805 * Warn the firmware that the device is now in the given
806 * state.
807 */
808 pci_set_powerstate_md(pc, tag, state, 0);
809
810 return (ostate);
811 }
812
813 #ifndef PCI_MACHDEP_ENUMERATE_BUS
814 /*
815 * Generic PCI bus enumeration routine. Used unless machine-dependent
816 * code needs to provide something else.
817 */
818 int
pci_enumerate_bus(struct pci_softc * sc,int (* match)(struct pci_attach_args *),struct pci_attach_args * pap)819 pci_enumerate_bus(struct pci_softc *sc,
820 int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
821 {
822 pci_chipset_tag_t pc = sc->sc_pc;
823 int device, function, nfunctions, ret;
824 int maxndevs = sc->sc_maxndevs;
825 const struct pci_quirkdata *qd;
826 pcireg_t id, bhlcr, cap;
827 pcitag_t tag;
828
829 /*
830 * PCIe downstream ports and root ports should only forward
831 * configuration requests for device number 0. However, not
832 * all hardware implements this correctly, and some devices
833 * will respond to other device numbers making the device show
834 * up 32 times. Prevent this by only scanning a single
835 * device.
836 */
837 if (sc->sc_bridgetag && pci_get_capability(pc, *sc->sc_bridgetag,
838 PCI_CAP_PCIEXPRESS, NULL, &cap)) {
839 switch (PCI_PCIE_XCAP_TYPE(cap)) {
840 case PCI_PCIE_XCAP_TYPE_RP:
841 case PCI_PCIE_XCAP_TYPE_DOWN:
842 case PCI_PCIE_XCAP_TYPE_PCI2PCIE:
843 maxndevs = 1;
844 break;
845 }
846 }
847
848 for (device = 0; device < maxndevs; device++) {
849 tag = pci_make_tag(pc, sc->sc_bus, device, 0);
850
851 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
852 if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
853 continue;
854
855 id = pci_conf_read(pc, tag, PCI_ID_REG);
856
857 /* Invalid vendor ID value? */
858 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
859 continue;
860 /* XXX Not invalid, but we've done this ~forever. */
861 if (PCI_VENDOR(id) == 0)
862 continue;
863
864 qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
865
866 if (qd != NULL &&
867 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)
868 nfunctions = 8;
869 else if (qd != NULL &&
870 (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0)
871 nfunctions = 1;
872 else
873 nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
874
875 for (function = 0; function < nfunctions; function++) {
876 tag = pci_make_tag(pc, sc->sc_bus, device, function);
877 ret = pci_probe_device(sc, tag, match, pap);
878 if (match != NULL && ret != 0)
879 return (ret);
880 }
881 }
882
883 return (0);
884 }
885 #endif /* PCI_MACHDEP_ENUMERATE_BUS */
886
887 int
pci_reserve_resources(struct pci_attach_args * pa)888 pci_reserve_resources(struct pci_attach_args *pa)
889 {
890 pci_chipset_tag_t pc = pa->pa_pc;
891 pcitag_t tag = pa->pa_tag;
892 pcireg_t bhlc, blr, bir, csr;
893 pcireg_t addr, mask, type;
894 bus_addr_t base, limit;
895 bus_size_t size;
896 int reg, reg_start, reg_end, reg_rom;
897 int bus, dev, func;
898 int sec, sub;
899 int flags;
900 int s;
901
902 pci_decompose_tag(pc, tag, &bus, &dev, &func);
903
904 bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG);
905 switch (PCI_HDRTYPE_TYPE(bhlc)) {
906 case 0:
907 reg_start = PCI_MAPREG_START;
908 reg_end = PCI_MAPREG_END;
909 reg_rom = PCI_ROM_REG;
910 break;
911 case 1: /* PCI-PCI bridge */
912 reg_start = PCI_MAPREG_START;
913 reg_end = PCI_MAPREG_PPB_END;
914 reg_rom = 0; /* 0x38 */
915 break;
916 case 2: /* PCI-CardBus bridge */
917 reg_start = PCI_MAPREG_START;
918 reg_end = PCI_MAPREG_PCB_END;
919 reg_rom = 0;
920 break;
921 default:
922 return (0);
923 }
924
925 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
926 for (reg = reg_start; reg < reg_end; reg += 4) {
927 if (!pci_mapreg_probe(pc, tag, reg, &type))
928 continue;
929
930 if (pci_mapreg_info(pc, tag, reg, type, &base, &size, &flags))
931 continue;
932
933 if (base == 0)
934 continue;
935
936 switch (type) {
937 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
938 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
939 if (ISSET(flags, BUS_SPACE_MAP_PREFETCHABLE) &&
940 pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
941 base, size, EX_NOWAIT) == 0) {
942 break;
943 }
944 #ifdef __sparc64__
945 /*
946 * Certain SPARC T5 systems assign
947 * non-prefetchable 64-bit BARs of their onboard
948 * mpii(4) controllers addresses in the
949 * prefetchable memory range. This is
950 * (probably) safe, as reads from the device
951 * registers mapped by these BARs are
952 * side-effect free. So assume the firmware
953 * knows what it is doing.
954 */
955 if (base >= 0x100000000 &&
956 pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
957 base, size, EX_NOWAIT) == 0) {
958 break;
959 }
960 #endif
961 if (pa->pa_memex && extent_alloc_region(pa->pa_memex,
962 base, size, EX_NOWAIT)) {
963 if (csr & PCI_COMMAND_MEM_ENABLE) {
964 printf("%d:%d:%d: mem address conflict"
965 " 0x%lx/0x%lx\n", bus, dev, func,
966 base, size);
967 }
968 pci_conf_write(pc, tag, reg, 0);
969 if (type & PCI_MAPREG_MEM_TYPE_64BIT)
970 pci_conf_write(pc, tag, reg + 4, 0);
971 }
972 break;
973 case PCI_MAPREG_TYPE_IO:
974 if (pa->pa_ioex && extent_alloc_region(pa->pa_ioex,
975 base, size, EX_NOWAIT)) {
976 if (csr & PCI_COMMAND_IO_ENABLE) {
977 printf("%d:%d:%d: io address conflict"
978 " 0x%lx/0x%lx\n", bus, dev, func,
979 base, size);
980 }
981 pci_conf_write(pc, tag, reg, 0);
982 }
983 break;
984 }
985
986 if (type & PCI_MAPREG_MEM_TYPE_64BIT)
987 reg += 4;
988 }
989
990 if (reg_rom != 0) {
991 s = splhigh();
992 addr = pci_conf_read(pc, tag, PCI_ROM_REG);
993 pci_conf_write(pc, tag, PCI_ROM_REG, ~PCI_ROM_ENABLE);
994 mask = pci_conf_read(pc, tag, PCI_ROM_REG);
995 pci_conf_write(pc, tag, PCI_ROM_REG, addr);
996 splx(s);
997
998 base = PCI_ROM_ADDR(addr);
999 size = PCI_ROM_SIZE(mask);
1000 if (base != 0 && size != 0) {
1001 if (pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
1002 base, size, EX_NOWAIT) &&
1003 pa->pa_memex && extent_alloc_region(pa->pa_memex,
1004 base, size, EX_NOWAIT)) {
1005 if (addr & PCI_ROM_ENABLE) {
1006 printf("%d:%d:%d: rom address conflict"
1007 " 0x%lx/0x%lx\n", bus, dev, func,
1008 base, size);
1009 }
1010 pci_conf_write(pc, tag, PCI_ROM_REG, 0);
1011 }
1012 }
1013 }
1014
1015 if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1016 return (0);
1017
1018 /* Figure out the I/O address range of the bridge. */
1019 blr = pci_conf_read(pc, tag, PPB_REG_IOSTATUS);
1020 base = (blr & 0x000000f0) << 8;
1021 limit = (blr & 0x000f000) | 0x00000fff;
1022 blr = pci_conf_read(pc, tag, PPB_REG_IO_HI);
1023 base |= (blr & 0x0000ffff) << 16;
1024 limit |= (blr & 0xffff0000);
1025 if (limit > base)
1026 size = (limit - base + 1);
1027 else
1028 size = 0;
1029 if (pa->pa_ioex && base > 0 && size > 0) {
1030 if (extent_alloc_region(pa->pa_ioex, base, size, EX_NOWAIT)) {
1031 printf("%d:%d:%d: bridge io address conflict 0x%lx/0x%lx\n",
1032 bus, dev, func, base, size);
1033 blr &= 0xffff0000;
1034 blr |= 0x000000f0;
1035 pci_conf_write(pc, tag, PPB_REG_IOSTATUS, blr);
1036 }
1037 }
1038
1039 /* Figure out the memory mapped I/O address range of the bridge. */
1040 blr = pci_conf_read(pc, tag, PPB_REG_MEM);
1041 base = (blr & 0x0000fff0) << 16;
1042 limit = (blr & 0xfff00000) | 0x000fffff;
1043 if (limit > base)
1044 size = (limit - base + 1);
1045 else
1046 size = 0;
1047 if (pa->pa_memex && base > 0 && size > 0) {
1048 if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) {
1049 printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
1050 bus, dev, func, base, size);
1051 pci_conf_write(pc, tag, PPB_REG_MEM, 0x0000fff0);
1052 }
1053 }
1054
1055 /* Figure out the prefetchable memory address range of the bridge. */
1056 blr = pci_conf_read(pc, tag, PPB_REG_PREFMEM);
1057 base = (blr & 0x0000fff0) << 16;
1058 limit = (blr & 0xfff00000) | 0x000fffff;
1059 #ifdef __LP64__
1060 blr = pci_conf_read(pc, pa->pa_tag, PPB_REG_PREFBASE_HI32);
1061 base |= ((uint64_t)blr) << 32;
1062 blr = pci_conf_read(pc, pa->pa_tag, PPB_REG_PREFLIM_HI32);
1063 limit |= ((uint64_t)blr) << 32;
1064 #endif
1065 if (limit > base)
1066 size = (limit - base + 1);
1067 else
1068 size = 0;
1069 if (pa->pa_pmemex && base > 0 && size > 0) {
1070 if (extent_alloc_region(pa->pa_pmemex, base, size, EX_NOWAIT)) {
1071 printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
1072 bus, dev, func, base, size);
1073 pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0);
1074 }
1075 } else if (pa->pa_memex && base > 0 && size > 0) {
1076 if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) {
1077 printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
1078 bus, dev, func, base, size);
1079 pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0);
1080 }
1081 }
1082
1083 /* Figure out the bus range handled by the bridge. */
1084 bir = pci_conf_read(pc, tag, PPB_REG_BUSINFO);
1085 sec = PPB_BUSINFO_SECONDARY(bir);
1086 sub = PPB_BUSINFO_SUBORDINATE(bir);
1087 if (pa->pa_busex && sub >= sec && sub > 0) {
1088 if (extent_alloc_region(pa->pa_busex, sec, sub - sec + 1,
1089 EX_NOWAIT)) {
1090 printf("%d:%d:%d: bridge bus conflict %d-%d\n",
1091 bus, dev, func, sec, sub);
1092 }
1093 }
1094
1095 return (0);
1096 }
1097
1098 /*
1099 * Vital Product Data (PCI 2.2)
1100 */
1101
1102 int
pci_vpd_read(pci_chipset_tag_t pc,pcitag_t tag,int offset,int count,pcireg_t * data)1103 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
1104 pcireg_t *data)
1105 {
1106 uint32_t reg;
1107 int ofs, i, j;
1108
1109 KASSERT(data != NULL);
1110 if ((offset + count) >= PCI_VPD_ADDRESS_MASK)
1111 return (EINVAL);
1112
1113 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0)
1114 return (ENXIO);
1115
1116 for (i = 0; i < count; offset += sizeof(*data), i++) {
1117 reg &= 0x0000ffff;
1118 reg &= ~PCI_VPD_OPFLAG;
1119 reg |= PCI_VPD_ADDRESS(offset);
1120 pci_conf_write(pc, tag, ofs, reg);
1121
1122 /*
1123 * PCI 2.2 does not specify how long we should poll
1124 * for completion nor whether the operation can fail.
1125 */
1126 j = 0;
1127 do {
1128 if (j++ == 20)
1129 return (EIO);
1130 delay(4);
1131 reg = pci_conf_read(pc, tag, ofs);
1132 } while ((reg & PCI_VPD_OPFLAG) == 0);
1133 data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
1134 }
1135
1136 return (0);
1137 }
1138
1139 int
pci_vpd_write(pci_chipset_tag_t pc,pcitag_t tag,int offset,int count,pcireg_t * data)1140 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
1141 pcireg_t *data)
1142 {
1143 pcireg_t reg;
1144 int ofs, i, j;
1145
1146 KASSERT(data != NULL);
1147 KASSERT((offset + count) < 0x7fff);
1148
1149 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0)
1150 return (1);
1151
1152 for (i = 0; i < count; offset += sizeof(*data), i++) {
1153 pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
1154
1155 reg &= 0x0000ffff;
1156 reg |= PCI_VPD_OPFLAG;
1157 reg |= PCI_VPD_ADDRESS(offset);
1158 pci_conf_write(pc, tag, ofs, reg);
1159
1160 /*
1161 * PCI 2.2 does not specify how long we should poll
1162 * for completion nor whether the operation can fail.
1163 */
1164 j = 0;
1165 do {
1166 if (j++ == 20)
1167 return (1);
1168 delay(1);
1169 reg = pci_conf_read(pc, tag, ofs);
1170 } while (reg & PCI_VPD_OPFLAG);
1171 }
1172
1173 return (0);
1174 }
1175
1176 int
pci_matchbyid(struct pci_attach_args * pa,const struct pci_matchid * ids,int nent)1177 pci_matchbyid(struct pci_attach_args *pa, const struct pci_matchid *ids,
1178 int nent)
1179 {
1180 const struct pci_matchid *pm;
1181 int i;
1182
1183 for (i = 0, pm = ids; i < nent; i++, pm++)
1184 if (PCI_VENDOR(pa->pa_id) == pm->pm_vid &&
1185 PCI_PRODUCT(pa->pa_id) == pm->pm_pid)
1186 return (1);
1187 return (0);
1188 }
1189
1190 void
pci_disable_legacy_vga(struct device * dev)1191 pci_disable_legacy_vga(struct device *dev)
1192 {
1193 struct pci_softc *pci;
1194 struct pci_dev *pd;
1195
1196 /* XXX Until we attach the drm drivers directly to pci. */
1197 while (dev->dv_parent->dv_cfdata->cf_driver != &pci_cd)
1198 dev = dev->dv_parent;
1199
1200 pci = (struct pci_softc *)dev->dv_parent;
1201 LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
1202 if (pd->pd_dev == dev) {
1203 pd->pd_vga_decode = 0;
1204 break;
1205 }
1206 }
1207 }
1208
1209 #ifdef USER_PCICONF
1210 /*
1211 * This is the user interface to PCI configuration space.
1212 */
1213
1214 #include <sys/pciio.h>
1215 #include <sys/fcntl.h>
1216
1217 #ifdef DEBUG
1218 #define PCIDEBUG(x) printf x
1219 #else
1220 #define PCIDEBUG(x)
1221 #endif
1222
1223 void pci_disable_vga(pci_chipset_tag_t, pcitag_t);
1224 void pci_enable_vga(pci_chipset_tag_t, pcitag_t);
1225 void pci_route_vga(struct pci_softc *);
1226 void pci_unroute_vga(struct pci_softc *);
1227
1228 int pciopen(dev_t dev, int oflags, int devtype, struct proc *p);
1229 int pciclose(dev_t dev, int flag, int devtype, struct proc *p);
1230 int pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p);
1231
1232 int
pciopen(dev_t dev,int oflags,int devtype,struct proc * p)1233 pciopen(dev_t dev, int oflags, int devtype, struct proc *p)
1234 {
1235 PCIDEBUG(("pciopen ndevs: %d\n" , pci_cd.cd_ndevs));
1236
1237 if (minor(dev) >= pci_ndomains) {
1238 return ENXIO;
1239 }
1240
1241 #ifndef APERTURE
1242 if ((oflags & FWRITE) && securelevel > 0) {
1243 return EPERM;
1244 }
1245 #else
1246 if ((oflags & FWRITE) && securelevel > 0 && allowaperture == 0) {
1247 return EPERM;
1248 }
1249 #endif
1250 return (0);
1251 }
1252
1253 int
pciclose(dev_t dev,int flag,int devtype,struct proc * p)1254 pciclose(dev_t dev, int flag, int devtype, struct proc *p)
1255 {
1256 PCIDEBUG(("pciclose\n"));
1257
1258 pci_vga_proc = NULL;
1259 return (0);
1260 }
1261
1262 int
pciioctl(dev_t dev,u_long cmd,caddr_t data,int flag,struct proc * p)1263 pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1264 {
1265 struct pcisel *sel = (struct pcisel *)data;
1266 struct pci_io *io;
1267 struct pci_dev *pd;
1268 struct pci_rom *rom;
1269 int i, error;
1270 pcitag_t tag;
1271 struct pci_softc *pci;
1272 pci_chipset_tag_t pc;
1273
1274 switch (cmd) {
1275 case PCIOCREAD:
1276 case PCIOCREADMASK:
1277 break;
1278 case PCIOCWRITE:
1279 if (!(flag & FWRITE))
1280 return EPERM;
1281 break;
1282 case PCIOCGETROMLEN:
1283 case PCIOCGETROM:
1284 case PCIOCGETVPD:
1285 break;
1286 case PCIOCGETVGA:
1287 case PCIOCSETVGA:
1288 if (pci_vga_pci == NULL)
1289 return EINVAL;
1290 break;
1291 default:
1292 return ENOTTY;
1293 }
1294
1295 for (i = 0; i < pci_cd.cd_ndevs; i++) {
1296 pci = pci_cd.cd_devs[i];
1297 if (pci != NULL && pci->sc_domain == minor(dev) &&
1298 pci->sc_bus == sel->pc_bus)
1299 break;
1300 }
1301 if (i >= pci_cd.cd_ndevs)
1302 return ENXIO;
1303
1304 /* Check bounds */
1305 if (pci->sc_bus >= 256 ||
1306 sel->pc_dev >= pci_bus_maxdevs(pci->sc_pc, pci->sc_bus) ||
1307 sel->pc_func >= 8)
1308 return EINVAL;
1309
1310 pc = pci->sc_pc;
1311 LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
1312 int bus, dev, func;
1313
1314 pci_decompose_tag(pc, pd->pd_tag, &bus, &dev, &func);
1315
1316 if (bus == sel->pc_bus && dev == sel->pc_dev &&
1317 func == sel->pc_func)
1318 break;
1319 }
1320 if (pd == NULL)
1321 return ENXIO;
1322
1323 tag = pci_make_tag(pc, sel->pc_bus, sel->pc_dev, sel->pc_func);
1324
1325 switch (cmd) {
1326 case PCIOCREAD:
1327 io = (struct pci_io *)data;
1328 switch (io->pi_width) {
1329 case 4:
1330 /* Configuration space bounds check */
1331 if (io->pi_reg < 0 ||
1332 io->pi_reg >= pci_conf_size(pc, tag))
1333 return EINVAL;
1334 /* Make sure the register is properly aligned */
1335 if (io->pi_reg & 0x3)
1336 return EINVAL;
1337 io->pi_data = pci_conf_read(pc, tag, io->pi_reg);
1338 error = 0;
1339 break;
1340 default:
1341 error = EINVAL;
1342 break;
1343 }
1344 break;
1345
1346 case PCIOCWRITE:
1347 io = (struct pci_io *)data;
1348 switch (io->pi_width) {
1349 case 4:
1350 /* Configuration space bounds check */
1351 if (io->pi_reg < 0 ||
1352 io->pi_reg >= pci_conf_size(pc, tag))
1353 return EINVAL;
1354 /* Make sure the register is properly aligned */
1355 if (io->pi_reg & 0x3)
1356 return EINVAL;
1357 pci_conf_write(pc, tag, io->pi_reg, io->pi_data);
1358 error = 0;
1359 break;
1360 default:
1361 error = EINVAL;
1362 break;
1363 }
1364 break;
1365
1366 case PCIOCREADMASK:
1367 io = (struct pci_io *)data;
1368
1369 if (io->pi_width != 4 || io->pi_reg & 0x3 ||
1370 io->pi_reg < PCI_MAPREG_START ||
1371 io->pi_reg >= PCI_MAPREG_END)
1372 return (EINVAL);
1373
1374 i = (io->pi_reg - PCI_MAPREG_START) / 4;
1375 io->pi_data = pd->pd_mask[i];
1376 error = 0;
1377 break;
1378
1379 case PCIOCGETROMLEN:
1380 case PCIOCGETROM:
1381 {
1382 pcireg_t addr, mask, bhlc;
1383 bus_space_handle_t h;
1384 bus_size_t len, off;
1385 char buf[256];
1386 int s;
1387
1388 rom = (struct pci_rom *)data;
1389
1390 bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG);
1391 if (PCI_HDRTYPE_TYPE(bhlc) != 0)
1392 return (ENODEV);
1393
1394 s = splhigh();
1395 addr = pci_conf_read(pc, tag, PCI_ROM_REG);
1396 pci_conf_write(pc, tag, PCI_ROM_REG, ~PCI_ROM_ENABLE);
1397 mask = pci_conf_read(pc, tag, PCI_ROM_REG);
1398 pci_conf_write(pc, tag, PCI_ROM_REG, addr);
1399 splx(s);
1400
1401 /*
1402 * Section 6.2.5.2 `Expansion ROM Base Address Register',
1403 *
1404 * tells us that only the upper 21 bits are writable.
1405 * This means that the size of a ROM must be a
1406 * multiple of 2 KB. So reading the ROM in chunks of
1407 * 256 bytes should work just fine.
1408 */
1409 if ((PCI_ROM_ADDR(addr) == 0 ||
1410 PCI_ROM_SIZE(mask) % sizeof(buf)) != 0)
1411 return (ENODEV);
1412
1413 /* If we're just after the size, skip reading the ROM. */
1414 if (cmd == PCIOCGETROMLEN) {
1415 error = 0;
1416 goto fail;
1417 }
1418
1419 if (rom->pr_romlen < PCI_ROM_SIZE(mask)) {
1420 error = ENOMEM;
1421 goto fail;
1422 }
1423
1424 error = bus_space_map(pci->sc_memt, PCI_ROM_ADDR(addr),
1425 PCI_ROM_SIZE(mask), 0, &h);
1426 if (error)
1427 goto fail;
1428
1429 off = 0;
1430 len = PCI_ROM_SIZE(mask);
1431 while (len > 0 && error == 0) {
1432 s = splhigh();
1433 pci_conf_write(pc, tag, PCI_ROM_REG,
1434 addr | PCI_ROM_ENABLE);
1435 bus_space_read_region_1(pci->sc_memt, h, off,
1436 buf, sizeof(buf));
1437 pci_conf_write(pc, tag, PCI_ROM_REG, addr);
1438 splx(s);
1439
1440 error = copyout(buf, rom->pr_rom + off, sizeof(buf));
1441 off += sizeof(buf);
1442 len -= sizeof(buf);
1443 }
1444
1445 bus_space_unmap(pci->sc_memt, h, PCI_ROM_SIZE(mask));
1446
1447 fail:
1448 rom->pr_romlen = PCI_ROM_SIZE(mask);
1449 break;
1450 }
1451
1452 case PCIOCGETVPD: {
1453 struct pci_vpd_req *pv = (struct pci_vpd_req *)data;
1454 pcireg_t *data;
1455 size_t len;
1456 unsigned int i;
1457 int s;
1458
1459 CTASSERT(sizeof(*data) == sizeof(*pv->pv_data));
1460
1461 data = mallocarray(pv->pv_count, sizeof(*data), M_TEMP,
1462 M_WAITOK|M_CANFAIL);
1463 if (data == NULL) {
1464 error = ENOMEM;
1465 break;
1466 }
1467
1468 s = splhigh();
1469 error = pci_vpd_read(pc, tag, pv->pv_offset, pv->pv_count,
1470 data);
1471 splx(s);
1472
1473 len = pv->pv_count * sizeof(*pv->pv_data);
1474
1475 if (error == 0) {
1476 for (i = 0; i < pv->pv_count; i++)
1477 data[i] = letoh32(data[i]);
1478
1479 error = copyout(data, pv->pv_data, len);
1480 }
1481
1482 free(data, M_TEMP, len);
1483 break;
1484 }
1485
1486 case PCIOCGETVGA:
1487 {
1488 struct pci_vga *vga = (struct pci_vga *)data;
1489 struct pci_dev *pd;
1490 int bus, dev, func;
1491
1492 vga->pv_decode = 0;
1493 LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
1494 pci_decompose_tag(pc, pd->pd_tag, NULL, &dev, &func);
1495 if (dev == sel->pc_dev && func == sel->pc_func) {
1496 if (pd->pd_vga_decode)
1497 vga->pv_decode = PCI_VGA_IO_ENABLE |
1498 PCI_VGA_MEM_ENABLE;
1499 break;
1500 }
1501 }
1502
1503 pci_decompose_tag(pci_vga_pci->sc_pc,
1504 pci_vga_tag, &bus, &dev, &func);
1505 vga->pv_sel.pc_bus = bus;
1506 vga->pv_sel.pc_dev = dev;
1507 vga->pv_sel.pc_func = func;
1508 error = 0;
1509 break;
1510 }
1511 case PCIOCSETVGA:
1512 {
1513 struct pci_vga *vga = (struct pci_vga *)data;
1514 int bus, dev, func;
1515
1516 switch (vga->pv_lock) {
1517 case PCI_VGA_UNLOCK:
1518 case PCI_VGA_LOCK:
1519 case PCI_VGA_TRYLOCK:
1520 break;
1521 default:
1522 return (EINVAL);
1523 }
1524
1525 if (vga->pv_lock == PCI_VGA_UNLOCK) {
1526 if (pci_vga_proc != p)
1527 return (EINVAL);
1528 pci_vga_proc = NULL;
1529 wakeup(&pci_vga_proc);
1530 return (0);
1531 }
1532
1533 while (pci_vga_proc != p && pci_vga_proc != NULL) {
1534 if (vga->pv_lock == PCI_VGA_TRYLOCK)
1535 return (EBUSY);
1536 error = tsleep_nsec(&pci_vga_proc, PLOCK | PCATCH,
1537 "vgalk", INFSLP);
1538 if (error)
1539 return (error);
1540 }
1541 pci_vga_proc = p;
1542
1543 pci_decompose_tag(pci_vga_pci->sc_pc,
1544 pci_vga_tag, &bus, &dev, &func);
1545 if (bus != vga->pv_sel.pc_bus || dev != vga->pv_sel.pc_dev ||
1546 func != vga->pv_sel.pc_func) {
1547 pci_disable_vga(pci_vga_pci->sc_pc, pci_vga_tag);
1548 if (pci != pci_vga_pci) {
1549 pci_unroute_vga(pci_vga_pci);
1550 pci_route_vga(pci);
1551 pci_vga_pci = pci;
1552 }
1553 pci_enable_vga(pc, tag);
1554 pci_vga_tag = tag;
1555 }
1556
1557 error = 0;
1558 break;
1559 }
1560
1561 default:
1562 error = ENOTTY;
1563 break;
1564 }
1565
1566 return (error);
1567 }
1568
1569 void
pci_disable_vga(pci_chipset_tag_t pc,pcitag_t tag)1570 pci_disable_vga(pci_chipset_tag_t pc, pcitag_t tag)
1571 {
1572 pcireg_t csr;
1573
1574 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1575 csr &= ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE);
1576 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1577 }
1578
1579 void
pci_enable_vga(pci_chipset_tag_t pc,pcitag_t tag)1580 pci_enable_vga(pci_chipset_tag_t pc, pcitag_t tag)
1581 {
1582 pcireg_t csr;
1583
1584 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1585 csr |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE;
1586 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1587 }
1588
1589 void
pci_route_vga(struct pci_softc * sc)1590 pci_route_vga(struct pci_softc *sc)
1591 {
1592 pci_chipset_tag_t pc = sc->sc_pc;
1593 pcireg_t bc;
1594
1595 if (sc->sc_bridgetag == NULL)
1596 return;
1597
1598 bc = pci_conf_read(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL);
1599 bc |= PPB_BC_VGA_ENABLE;
1600 pci_conf_write(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL, bc);
1601
1602 pci_route_vga((struct pci_softc *)sc->sc_dev.dv_parent->dv_parent);
1603 }
1604
1605 void
pci_unroute_vga(struct pci_softc * sc)1606 pci_unroute_vga(struct pci_softc *sc)
1607 {
1608 pci_chipset_tag_t pc = sc->sc_pc;
1609 pcireg_t bc;
1610
1611 if (sc->sc_bridgetag == NULL)
1612 return;
1613
1614 bc = pci_conf_read(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL);
1615 bc &= ~PPB_BC_VGA_ENABLE;
1616 pci_conf_write(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL, bc);
1617
1618 pci_unroute_vga((struct pci_softc *)sc->sc_dev.dv_parent->dv_parent);
1619 }
1620 #endif /* USER_PCICONF */
1621
1622 int
pci_primary_vga(struct pci_attach_args * pa)1623 pci_primary_vga(struct pci_attach_args *pa)
1624 {
1625 /* XXX For now, only handle the first PCI domain. */
1626 if (pa->pa_domain != 0)
1627 return (0);
1628
1629 if ((PCI_CLASS(pa->pa_class) != PCI_CLASS_DISPLAY ||
1630 PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_DISPLAY_VGA) &&
1631 (PCI_CLASS(pa->pa_class) != PCI_CLASS_PREHISTORIC ||
1632 PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_PREHISTORIC_VGA))
1633 return (0);
1634
1635 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG)
1636 & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
1637 != (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
1638 return (0);
1639
1640 pci_vga_tag = pa->pa_tag;
1641
1642 return (1);
1643 }
1644
1645 #ifdef __HAVE_PCI_MSIX
1646
1647 struct msix_vector *
pci_alloc_msix_table(pci_chipset_tag_t pc,pcitag_t tag)1648 pci_alloc_msix_table(pci_chipset_tag_t pc, pcitag_t tag)
1649 {
1650 struct msix_vector *table;
1651 pcireg_t reg;
1652 int tblsz;
1653
1654 if (pci_get_capability(pc, tag, PCI_CAP_MSIX, NULL, ®) == 0)
1655 return NULL;
1656
1657 tblsz = PCI_MSIX_MC_TBLSZ(reg) + 1;
1658 table = mallocarray(tblsz, sizeof(*table), M_DEVBUF, M_WAITOK);
1659
1660 return table;
1661 }
1662
1663 void
pci_free_msix_table(pci_chipset_tag_t pc,pcitag_t tag,struct msix_vector * table)1664 pci_free_msix_table(pci_chipset_tag_t pc, pcitag_t tag,
1665 struct msix_vector *table)
1666 {
1667 pcireg_t reg;
1668 int tblsz;
1669
1670 if (pci_get_capability(pc, tag, PCI_CAP_MSIX, NULL, ®) == 0)
1671 return;
1672
1673 tblsz = PCI_MSIX_MC_TBLSZ(reg) + 1;
1674 free(table, M_DEVBUF, tblsz * sizeof(*table));
1675 }
1676
1677 void
pci_suspend_msix(pci_chipset_tag_t pc,pcitag_t tag,bus_space_tag_t memt,pcireg_t * mc,struct msix_vector * table)1678 pci_suspend_msix(pci_chipset_tag_t pc, pcitag_t tag,
1679 bus_space_tag_t memt, pcireg_t *mc, struct msix_vector *table)
1680 {
1681 bus_space_handle_t memh;
1682 pcireg_t reg;
1683 int tblsz, i;
1684
1685 if (pci_get_capability(pc, tag, PCI_CAP_MSIX, NULL, ®) == 0)
1686 return;
1687
1688 KASSERT(table != NULL);
1689
1690 if (pci_msix_table_map(pc, tag, memt, &memh))
1691 return;
1692
1693 tblsz = PCI_MSIX_MC_TBLSZ(reg) + 1;
1694 for (i = 0; i < tblsz; i++) {
1695 table[i].mv_ma = bus_space_read_4(memt, memh, PCI_MSIX_MA(i));
1696 table[i].mv_mau32 = bus_space_read_4(memt, memh,
1697 PCI_MSIX_MAU32(i));
1698 table[i].mv_md = bus_space_read_4(memt, memh, PCI_MSIX_MD(i));
1699 table[i].mv_vc = bus_space_read_4(memt, memh, PCI_MSIX_VC(i));
1700 }
1701
1702 pci_msix_table_unmap(pc, tag, memt, memh);
1703
1704 *mc = reg;
1705 }
1706
1707 void
pci_resume_msix(pci_chipset_tag_t pc,pcitag_t tag,bus_space_tag_t memt,pcireg_t mc,struct msix_vector * table)1708 pci_resume_msix(pci_chipset_tag_t pc, pcitag_t tag,
1709 bus_space_tag_t memt, pcireg_t mc, struct msix_vector *table)
1710 {
1711 bus_space_handle_t memh;
1712 pcireg_t reg;
1713 int tblsz, i;
1714 int off;
1715
1716 if (pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, ®) == 0)
1717 return;
1718
1719 KASSERT(table != NULL);
1720
1721 if (pci_msix_table_map(pc, tag, memt, &memh))
1722 return;
1723
1724 tblsz = PCI_MSIX_MC_TBLSZ(reg) + 1;
1725 for (i = 0; i < tblsz; i++) {
1726 bus_space_write_4(memt, memh, PCI_MSIX_MA(i), table[i].mv_ma);
1727 bus_space_write_4(memt, memh, PCI_MSIX_MAU32(i),
1728 table[i].mv_mau32);
1729 bus_space_write_4(memt, memh, PCI_MSIX_MD(i), table[i].mv_md);
1730 bus_space_barrier(memt, memh, PCI_MSIX_MA(i), 16,
1731 BUS_SPACE_BARRIER_WRITE);
1732 bus_space_write_4(memt, memh, PCI_MSIX_VC(i), table[i].mv_vc);
1733 bus_space_barrier(memt, memh, PCI_MSIX_VC(i), 4,
1734 BUS_SPACE_BARRIER_WRITE);
1735 }
1736
1737 pci_msix_table_unmap(pc, tag, memt, memh);
1738
1739 pci_conf_write(pc, tag, off, mc);
1740 }
1741
1742 int
pci_intr_msix_count(struct pci_attach_args * pa)1743 pci_intr_msix_count(struct pci_attach_args *pa)
1744 {
1745 pcireg_t reg;
1746
1747 if ((pa->pa_flags & PCI_FLAGS_MSI_ENABLED) == 0)
1748 return (0);
1749
1750 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, NULL,
1751 ®) == 0)
1752 return (0);
1753
1754 return (PCI_MSIX_MC_TBLSZ(reg) + 1);
1755 }
1756
1757 #else /* __HAVE_PCI_MSIX */
1758
1759 struct msix_vector *
pci_alloc_msix_table(pci_chipset_tag_t pc,pcitag_t tag)1760 pci_alloc_msix_table(pci_chipset_tag_t pc, pcitag_t tag)
1761 {
1762 return NULL;
1763 }
1764
1765 void
pci_free_msix_table(pci_chipset_tag_t pc,pcitag_t tag,struct msix_vector * table)1766 pci_free_msix_table(pci_chipset_tag_t pc, pcitag_t tag,
1767 struct msix_vector *table)
1768 {
1769 }
1770
1771 void
pci_suspend_msix(pci_chipset_tag_t pc,pcitag_t tag,bus_space_tag_t memt,pcireg_t * mc,struct msix_vector * table)1772 pci_suspend_msix(pci_chipset_tag_t pc, pcitag_t tag,
1773 bus_space_tag_t memt, pcireg_t *mc, struct msix_vector *table)
1774 {
1775 }
1776
1777 void
pci_resume_msix(pci_chipset_tag_t pc,pcitag_t tag,bus_space_tag_t memt,pcireg_t mc,struct msix_vector * table)1778 pci_resume_msix(pci_chipset_tag_t pc, pcitag_t tag,
1779 bus_space_tag_t memt, pcireg_t mc, struct msix_vector *table)
1780 {
1781 }
1782
1783 int
pci_intr_msix_count(struct pci_attach_args * pa)1784 pci_intr_msix_count(struct pci_attach_args *pa)
1785 {
1786 return (0);
1787 }
1788
1789 #endif /* __HAVE_PCI_MSIX */
1790