xref: /openbsd/sys/dev/pci/pci.c (revision fc61954a)
1 /*	$OpenBSD: pci.c,v 1.111 2015/11/27 15:28:22 kettenis Exp $	*/
2 /*	$NetBSD: pci.c,v 1.31 1997/06/06 23:48:04 thorpej Exp $	*/
3 
4 /*
5  * Copyright (c) 1995, 1996 Christopher G. Demetriou.  All rights reserved.
6  * Copyright (c) 1994 Charles Hannum.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Charles Hannum.
19  * 4. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * PCI bus autoconfiguration.
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pcivar.h>
45 #include <dev/pci/pcidevs.h>
46 #include <dev/pci/ppbreg.h>
47 
48 int pcimatch(struct device *, void *, void *);
49 void pciattach(struct device *, struct device *, void *);
50 int pcidetach(struct device *, int);
51 int pciactivate(struct device *, int);
52 void pci_suspend(struct pci_softc *);
53 void pci_powerdown(struct pci_softc *);
54 void pci_resume(struct pci_softc *);
55 
56 #define NMAPREG			((PCI_MAPREG_END - PCI_MAPREG_START) / \
57 				    sizeof(pcireg_t))
58 struct pci_dev {
59 	struct device *pd_dev;
60 	LIST_ENTRY(pci_dev) pd_next;
61 	pcitag_t pd_tag;        /* pci register tag */
62 	pcireg_t pd_csr;
63 	pcireg_t pd_bhlc;
64 	pcireg_t pd_int;
65 	pcireg_t pd_map[NMAPREG];
66 	pcireg_t pd_mask[NMAPREG];
67 	pcireg_t pd_msi_mc;
68 	pcireg_t pd_msi_ma;
69 	pcireg_t pd_msi_mau32;
70 	pcireg_t pd_msi_md;
71 	int pd_pmcsr_state;
72 	int pd_vga_decode;
73 };
74 
75 #ifdef APERTURE
76 extern int allowaperture;
77 #endif
78 
79 struct cfattach pci_ca = {
80 	sizeof(struct pci_softc), pcimatch, pciattach, pcidetach, pciactivate
81 };
82 
83 struct cfdriver pci_cd = {
84 	NULL, "pci", DV_DULL
85 };
86 
87 int	pci_ndomains;
88 
89 struct proc *pci_vga_proc;
90 struct pci_softc *pci_vga_pci;
91 pcitag_t pci_vga_tag;
92 
93 int	pci_dopm;
94 
95 int	pciprint(void *, const char *);
96 int	pcisubmatch(struct device *, void *, void *);
97 
98 #ifdef PCI_MACHDEP_ENUMERATE_BUS
99 #define pci_enumerate_bus PCI_MACHDEP_ENUMERATE_BUS
100 #else
101 int pci_enumerate_bus(struct pci_softc *,
102     int (*)(struct pci_attach_args *), struct pci_attach_args *);
103 #endif
104 int	pci_reserve_resources(struct pci_attach_args *);
105 int	pci_primary_vga(struct pci_attach_args *);
106 
107 /*
108  * Important note about PCI-ISA bridges:
109  *
110  * Callbacks are used to configure these devices so that ISA/EISA bridges
111  * can attach their child busses after PCI configuration is done.
112  *
113  * This works because:
114  *	(1) there can be at most one ISA/EISA bridge per PCI bus, and
115  *	(2) any ISA/EISA bridges must be attached to primary PCI
116  *	    busses (i.e. bus zero).
117  *
118  * That boils down to: there can only be one of these outstanding
119  * at a time, it is cleared when configuring PCI bus 0 before any
120  * subdevices have been found, and it is run after all subdevices
121  * of PCI bus 0 have been found.
122  *
123  * This is needed because there are some (legacy) PCI devices which
124  * can show up as ISA/EISA devices as well (the prime example of which
125  * are VGA controllers).  If you attach ISA from a PCI-ISA/EISA bridge,
126  * and the bridge is seen before the video board is, the board can show
127  * up as an ISA device, and that can (bogusly) complicate the PCI device's
128  * attach code, or make the PCI device not be properly attached at all.
129  *
130  * We use the generic config_defer() facility to achieve this.
131  */
132 
133 int
134 pcimatch(struct device *parent, void *match, void *aux)
135 {
136 	struct cfdata *cf = match;
137 	struct pcibus_attach_args *pba = aux;
138 
139 	if (strcmp(pba->pba_busname, cf->cf_driver->cd_name))
140 		return (0);
141 
142 	/* Check the locators */
143 	if (cf->pcibuscf_bus != PCIBUS_UNK_BUS &&
144 	    cf->pcibuscf_bus != pba->pba_bus)
145 		return (0);
146 
147 	/* sanity */
148 	if (pba->pba_bus < 0 || pba->pba_bus > 255)
149 		return (0);
150 
151 	/*
152 	 * XXX check other (hardware?) indicators
153 	 */
154 
155 	return (1);
156 }
157 
158 void
159 pciattach(struct device *parent, struct device *self, void *aux)
160 {
161 	struct pcibus_attach_args *pba = aux;
162 	struct pci_softc *sc = (struct pci_softc *)self;
163 
164 	pci_attach_hook(parent, self, pba);
165 
166 	printf("\n");
167 
168 	LIST_INIT(&sc->sc_devs);
169 
170 	sc->sc_iot = pba->pba_iot;
171 	sc->sc_memt = pba->pba_memt;
172 	sc->sc_dmat = pba->pba_dmat;
173 	sc->sc_pc = pba->pba_pc;
174 	sc->sc_flags = pba->pba_flags;
175 	sc->sc_ioex = pba->pba_ioex;
176 	sc->sc_memex = pba->pba_memex;
177 	sc->sc_pmemex = pba->pba_pmemex;
178 	sc->sc_busex = pba->pba_busex;
179 	sc->sc_domain = pba->pba_domain;
180 	sc->sc_bus = pba->pba_bus;
181 	sc->sc_bridgetag = pba->pba_bridgetag;
182 	sc->sc_bridgeih = pba->pba_bridgeih;
183 	sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
184 	sc->sc_intrswiz = pba->pba_intrswiz;
185 	sc->sc_intrtag = pba->pba_intrtag;
186 
187 	/* Reserve our own bus number. */
188 	if (sc->sc_busex)
189 		extent_alloc_region(sc->sc_busex, sc->sc_bus, 1, EX_NOWAIT);
190 
191 	pci_enumerate_bus(sc, pci_reserve_resources, NULL);
192 
193 	/* Find the VGA device that's currently active. */
194 	if (pci_enumerate_bus(sc, pci_primary_vga, NULL))
195 		pci_vga_pci = sc;
196 
197 	pci_enumerate_bus(sc, NULL, NULL);
198 }
199 
200 int
201 pcidetach(struct device *self, int flags)
202 {
203 	return pci_detach_devices((struct pci_softc *)self, flags);
204 }
205 
206 int
207 pciactivate(struct device *self, int act)
208 {
209 	int rv = 0;
210 
211 	switch (act) {
212 	case DVACT_SUSPEND:
213 		rv = config_activate_children(self, act);
214 		pci_suspend((struct pci_softc *)self);
215 		break;
216 	case DVACT_RESUME:
217 		pci_resume((struct pci_softc *)self);
218 		rv = config_activate_children(self, act);
219 		break;
220 	case DVACT_POWERDOWN:
221 		rv = config_activate_children(self, act);
222 		pci_powerdown((struct pci_softc *)self);
223 		break;
224 	default:
225 		rv = config_activate_children(self, act);
226 		break;
227 	}
228 	return (rv);
229 }
230 
231 void
232 pci_suspend(struct pci_softc *sc)
233 {
234 	struct pci_dev *pd;
235 	pcireg_t bhlc, reg;
236 	int off, i;
237 
238 	LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
239 		/*
240 		 * Only handle header type 0 here; PCI-PCI bridges and
241 		 * CardBus bridges need special handling, which will
242 		 * be done in their specific drivers.
243 		 */
244 		bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
245 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
246 			continue;
247 
248 		/* Save registers that may get lost. */
249 		for (i = 0; i < NMAPREG; i++)
250 			pd->pd_map[i] = pci_conf_read(sc->sc_pc, pd->pd_tag,
251 			    PCI_MAPREG_START + (i * 4));
252 		pd->pd_csr = pci_conf_read(sc->sc_pc, pd->pd_tag,
253 		    PCI_COMMAND_STATUS_REG);
254 		pd->pd_bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag,
255 		    PCI_BHLC_REG);
256 		pd->pd_int = pci_conf_read(sc->sc_pc, pd->pd_tag,
257 		    PCI_INTERRUPT_REG);
258 
259 		if (pci_get_capability(sc->sc_pc, pd->pd_tag,
260 		    PCI_CAP_MSI, &off, &reg)) {
261 			pd->pd_msi_ma = pci_conf_read(sc->sc_pc, pd->pd_tag,
262 			    off + PCI_MSI_MA);
263 			if (reg & PCI_MSI_MC_C64) {
264 				pd->pd_msi_mau32 = pci_conf_read(sc->sc_pc,
265 				    pd->pd_tag, off + PCI_MSI_MAU32);
266 				pd->pd_msi_md = pci_conf_read(sc->sc_pc,
267 				    pd->pd_tag, off + PCI_MSI_MD64);
268 			} else {
269 				pd->pd_msi_md = pci_conf_read(sc->sc_pc,
270 				    pd->pd_tag, off + PCI_MSI_MD32);
271 			}
272 			pd->pd_msi_mc = reg;
273 		}
274 	}
275 }
276 
277 void
278 pci_powerdown(struct pci_softc *sc)
279 {
280 	struct pci_dev *pd;
281 	pcireg_t bhlc;
282 
283 	LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
284 		/*
285 		 * Only handle header type 0 here; PCI-PCI bridges and
286 		 * CardBus bridges need special handling, which will
287 		 * be done in their specific drivers.
288 		 */
289 		bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
290 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
291 			continue;
292 
293 		if (pci_dopm) {
294 			/*
295 			 * Place the device into the lowest possible
296 			 * power state.
297 			 */
298 			pd->pd_pmcsr_state = pci_get_powerstate(sc->sc_pc,
299 			    pd->pd_tag);
300 			pci_set_powerstate(sc->sc_pc, pd->pd_tag,
301 			    pci_min_powerstate(sc->sc_pc, pd->pd_tag));
302 		}
303 	}
304 }
305 
306 void
307 pci_resume(struct pci_softc *sc)
308 {
309 	struct pci_dev *pd;
310 	pcireg_t bhlc, reg;
311 	int off, i;
312 
313 	LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
314 		/*
315 		 * Only handle header type 0 here; PCI-PCI bridges and
316 		 * CardBus bridges need special handling, which will
317 		 * be done in their specific drivers.
318 		 */
319 		bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
320 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
321 			continue;
322 
323 		/* Restore power. */
324 		if (pci_dopm)
325 			pci_set_powerstate(sc->sc_pc, pd->pd_tag,
326 			    pd->pd_pmcsr_state);
327 
328 		/* Restore the registers saved above. */
329 		for (i = 0; i < NMAPREG; i++)
330 			pci_conf_write(sc->sc_pc, pd->pd_tag,
331 			    PCI_MAPREG_START + (i * 4), pd->pd_map[i]);
332 		reg = pci_conf_read(sc->sc_pc, pd->pd_tag,
333 		    PCI_COMMAND_STATUS_REG);
334 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_COMMAND_STATUS_REG,
335 		    (reg & 0xffff0000) | (pd->pd_csr & 0x0000ffff));
336 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG,
337 		    pd->pd_bhlc);
338 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_INTERRUPT_REG,
339 		    pd->pd_int);
340 
341 		if (pci_get_capability(sc->sc_pc, pd->pd_tag,
342 		    PCI_CAP_MSI, &off, &reg)) {
343 			pci_conf_write(sc->sc_pc, pd->pd_tag,
344 			    off + PCI_MSI_MA, pd->pd_msi_ma);
345 			if (reg & PCI_MSI_MC_C64) {
346 				pci_conf_write(sc->sc_pc, pd->pd_tag,
347 				    off + PCI_MSI_MAU32, pd->pd_msi_mau32);
348 				pci_conf_write(sc->sc_pc, pd->pd_tag,
349 				    off + PCI_MSI_MD64, pd->pd_msi_md);
350 			} else {
351 				pci_conf_write(sc->sc_pc, pd->pd_tag,
352 				    off + PCI_MSI_MD32, pd->pd_msi_md);
353 			}
354 			pci_conf_write(sc->sc_pc, pd->pd_tag,
355 			    off + PCI_MSI_MC, pd->pd_msi_mc);
356 		}
357 	}
358 }
359 
360 int
361 pciprint(void *aux, const char *pnp)
362 {
363 	struct pci_attach_args *pa = aux;
364 	char devinfo[256];
365 
366 	if (pnp) {
367 		pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo,
368 		    sizeof devinfo);
369 		printf("%s at %s", devinfo, pnp);
370 	}
371 	printf(" dev %d function %d", pa->pa_device, pa->pa_function);
372 	if (!pnp) {
373 		pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo,
374 		    sizeof devinfo);
375 		printf(" %s", devinfo);
376 	}
377 
378 	return (UNCONF);
379 }
380 
381 int
382 pcisubmatch(struct device *parent, void *match,  void *aux)
383 {
384 	struct cfdata *cf = match;
385 	struct pci_attach_args *pa = aux;
386 
387 	if (cf->pcicf_dev != PCI_UNK_DEV &&
388 	    cf->pcicf_dev != pa->pa_device)
389 		return (0);
390 	if (cf->pcicf_function != PCI_UNK_FUNCTION &&
391 	    cf->pcicf_function != pa->pa_function)
392 		return (0);
393 
394 	return ((*cf->cf_attach->ca_match)(parent, match, aux));
395 }
396 
397 int
398 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
399     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
400 {
401 	pci_chipset_tag_t pc = sc->sc_pc;
402 	struct pci_attach_args pa;
403 	struct pci_dev *pd;
404 	pcireg_t id, class, intr, bhlcr, cap;
405 	int pin, bus, device, function;
406 	int off, ret = 0;
407 	uint64_t addr;
408 
409 	pci_decompose_tag(pc, tag, &bus, &device, &function);
410 
411 	bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
412 	if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
413 		return (0);
414 
415 	id = pci_conf_read(pc, tag, PCI_ID_REG);
416 	class = pci_conf_read(pc, tag, PCI_CLASS_REG);
417 
418 	/* Invalid vendor ID value? */
419 	if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
420 		return (0);
421 	/* XXX Not invalid, but we've done this ~forever. */
422 	if (PCI_VENDOR(id) == 0)
423 		return (0);
424 
425 	pa.pa_iot = sc->sc_iot;
426 	pa.pa_memt = sc->sc_memt;
427 	pa.pa_dmat = sc->sc_dmat;
428 	pa.pa_pc = pc;
429 	pa.pa_ioex = sc->sc_ioex;
430 	pa.pa_memex = sc->sc_memex;
431 	pa.pa_pmemex = sc->sc_pmemex;
432 	pa.pa_busex = sc->sc_busex;
433 	pa.pa_domain = sc->sc_domain;
434 	pa.pa_bus = bus;
435 	pa.pa_device = device;
436 	pa.pa_function = function;
437 	pa.pa_tag = tag;
438 	pa.pa_id = id;
439 	pa.pa_class = class;
440 	pa.pa_bridgetag = sc->sc_bridgetag;
441 	pa.pa_bridgeih = sc->sc_bridgeih;
442 
443 	/* This is a simplification of the NetBSD code.
444 	   We don't support turning off I/O or memory
445 	   on broken hardware. <csapuntz@stanford.edu> */
446 	pa.pa_flags = sc->sc_flags;
447 	pa.pa_flags |= PCI_FLAGS_IO_ENABLED | PCI_FLAGS_MEM_ENABLED;
448 
449 	if (sc->sc_bridgetag == NULL) {
450 		pa.pa_intrswiz = 0;
451 		pa.pa_intrtag = tag;
452 	} else {
453 		pa.pa_intrswiz = sc->sc_intrswiz + device;
454 		pa.pa_intrtag = sc->sc_intrtag;
455 	}
456 
457 	intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
458 
459 	pin = PCI_INTERRUPT_PIN(intr);
460 	pa.pa_rawintrpin = pin;
461 	if (pin == PCI_INTERRUPT_PIN_NONE) {
462 		/* no interrupt */
463 		pa.pa_intrpin = 0;
464 	} else {
465 		/*
466 		 * swizzle it based on the number of busses we're
467 		 * behind and our device number.
468 		 */
469 		pa.pa_intrpin = 	/* XXX */
470 		    ((pin + pa.pa_intrswiz - 1) % 4) + 1;
471 	}
472 	pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
473 
474 	if (pci_get_ht_capability(pc, tag, PCI_HT_CAP_MSI, &off, &cap)) {
475 		/*
476 		 * XXX Should we enable MSI mapping ourselves on
477 		 * systems that have it disabled?
478 		 */
479 		if (cap & PCI_HT_MSI_ENABLED) {
480 			if ((cap & PCI_HT_MSI_FIXED) == 0) {
481 				addr = pci_conf_read(pc, tag,
482 				    off + PCI_HT_MSI_ADDR);
483 				addr |= (uint64_t)pci_conf_read(pc, tag,
484 				    off + PCI_HT_MSI_ADDR_HI32) << 32;
485 			} else
486 				addr = PCI_HT_MSI_FIXED_ADDR;
487 
488 			/*
489 			 * XXX This will fail to enable MSI on systems
490 			 * that don't use the canonical address.
491 			 */
492 			if (addr == PCI_HT_MSI_FIXED_ADDR)
493 				pa.pa_flags |= PCI_FLAGS_MSI_ENABLED;
494 		}
495 	}
496 
497 	/*
498 	 * Give the MD code a chance to alter pci_attach_args and/or
499 	 * skip devices.
500 	 */
501 	if (pci_probe_device_hook(pc, &pa) != 0)
502 		return (0);
503 
504 	if (match != NULL) {
505 		ret = (*match)(&pa);
506 		if (ret != 0 && pap != NULL)
507 			*pap = pa;
508 	} else {
509 		pcireg_t address, csr;
510 		int i, reg, reg_start, reg_end;
511 		int s;
512 
513 		pd = malloc(sizeof *pd, M_DEVBUF, M_ZERO | M_WAITOK);
514 		pd->pd_tag = tag;
515 		LIST_INSERT_HEAD(&sc->sc_devs, pd, pd_next);
516 
517 		switch (PCI_HDRTYPE_TYPE(bhlcr)) {
518 		case 0:
519 			reg_start = PCI_MAPREG_START;
520 			reg_end = PCI_MAPREG_END;
521 			break;
522 		case 1: /* PCI-PCI bridge */
523 			reg_start = PCI_MAPREG_START;
524 			reg_end = PCI_MAPREG_PPB_END;
525 			break;
526 		case 2: /* PCI-CardBus bridge */
527 			reg_start = PCI_MAPREG_START;
528 			reg_end = PCI_MAPREG_PCB_END;
529 			break;
530 		default:
531 			return (0);
532 		}
533 
534 		s = splhigh();
535 		csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
536 		if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
537 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr &
538 			    ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE));
539 
540 		for (reg = reg_start, i = 0; reg < reg_end; reg += 4, i++) {
541 			address = pci_conf_read(pc, tag, reg);
542 			pci_conf_write(pc, tag, reg, 0xffffffff);
543 			pd->pd_mask[i] = pci_conf_read(pc, tag, reg);
544 			pci_conf_write(pc, tag, reg, address);
545 		}
546 
547 		if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
548 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
549 		splx(s);
550 
551 		if ((PCI_CLASS(class) == PCI_CLASS_DISPLAY &&
552 		    PCI_SUBCLASS(class) == PCI_SUBCLASS_DISPLAY_VGA) ||
553 		    (PCI_CLASS(class) == PCI_CLASS_PREHISTORIC &&
554 		    PCI_SUBCLASS(class) == PCI_SUBCLASS_PREHISTORIC_VGA))
555 			pd->pd_vga_decode = 1;
556 
557 		pd->pd_dev = config_found_sm(&sc->sc_dev, &pa, pciprint,
558 		    pcisubmatch);
559 		if (pd->pd_dev)
560 			pci_dev_postattach(pd->pd_dev, &pa);
561 	}
562 
563 	return (ret);
564 }
565 
566 int
567 pci_detach_devices(struct pci_softc *sc, int flags)
568 {
569 	struct pci_dev *pd, *next;
570 	int ret;
571 
572 	ret = config_detach_children(&sc->sc_dev, flags);
573 	if (ret != 0)
574 		return (ret);
575 
576 	for (pd = LIST_FIRST(&sc->sc_devs); pd != NULL; pd = next) {
577 		next = LIST_NEXT(pd, pd_next);
578 		free(pd, M_DEVBUF, sizeof *pd);
579 	}
580 	LIST_INIT(&sc->sc_devs);
581 
582 	return (0);
583 }
584 
585 int
586 pci_get_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
587     int *offset, pcireg_t *value)
588 {
589 	pcireg_t reg;
590 	unsigned int ofs;
591 
592 	reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
593 	if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
594 		return (0);
595 
596 	/* Determine the Capability List Pointer register to start with. */
597 	reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
598 	switch (PCI_HDRTYPE_TYPE(reg)) {
599 	case 0:	/* standard device header */
600 	case 1: /* PCI-PCI bridge header */
601 		ofs = PCI_CAPLISTPTR_REG;
602 		break;
603 	case 2:	/* PCI-CardBus bridge header */
604 		ofs = PCI_CARDBUS_CAPLISTPTR_REG;
605 		break;
606 	default:
607 		return (0);
608 	}
609 
610 	ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
611 	while (ofs != 0) {
612 		/*
613 		 * Some devices, like parts of the NVIDIA C51 chipset,
614 		 * have a broken Capabilities List.  So we need to do
615 		 * a sanity check here.
616 		 */
617 		if ((ofs & 3) || (ofs < 0x40))
618 			return (0);
619 		reg = pci_conf_read(pc, tag, ofs);
620 		if (PCI_CAPLIST_CAP(reg) == capid) {
621 			if (offset)
622 				*offset = ofs;
623 			if (value)
624 				*value = reg;
625 			return (1);
626 		}
627 		ofs = PCI_CAPLIST_NEXT(reg);
628 	}
629 
630 	return (0);
631 }
632 
633 int
634 pci_get_ht_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
635     int *offset, pcireg_t *value)
636 {
637 	pcireg_t reg;
638 	unsigned int ofs;
639 
640 	if (pci_get_capability(pc, tag, PCI_CAP_HT, &ofs, NULL) == 0)
641 		return (0);
642 
643 	while (ofs != 0) {
644 #ifdef DIAGNOSTIC
645 		if ((ofs & 3) || (ofs < 0x40))
646 			panic("pci_get_ht_capability");
647 #endif
648 		reg = pci_conf_read(pc, tag, ofs);
649 		if (PCI_HT_CAP(reg) == capid) {
650 			if (offset)
651 				*offset = ofs;
652 			if (value)
653 				*value = reg;
654 			return (1);
655 		}
656 		ofs = PCI_CAPLIST_NEXT(reg);
657 	}
658 
659 	return (0);
660 }
661 
662 int
663 pci_find_device(struct pci_attach_args *pa,
664     int (*match)(struct pci_attach_args *))
665 {
666 	extern struct cfdriver pci_cd;
667 	struct device *pcidev;
668 	int i;
669 
670 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
671 		pcidev = pci_cd.cd_devs[i];
672 		if (pcidev != NULL &&
673 		    pci_enumerate_bus((struct pci_softc *)pcidev,
674 		    		      match, pa) != 0)
675 			return (1);
676 	}
677 	return (0);
678 }
679 
680 int
681 pci_get_powerstate(pci_chipset_tag_t pc, pcitag_t tag)
682 {
683 	pcireg_t reg;
684 	int offset;
685 
686 	if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) {
687 		reg = pci_conf_read(pc, tag, offset + PCI_PMCSR);
688 		return (reg & PCI_PMCSR_STATE_MASK);
689 	}
690 	return (PCI_PMCSR_STATE_D0);
691 }
692 
693 int
694 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, int state)
695 {
696 	pcireg_t reg;
697 	int offset, ostate = state;
698 
699 	/*
700 	 * Warn the firmware that we are going to put the device
701 	 * into the given state.
702 	 */
703 	pci_set_powerstate_md(pc, tag, state, 1);
704 
705 	if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) {
706 		if (state == PCI_PMCSR_STATE_D3) {
707 			/*
708 			 * The PCI Power Management spec says we
709 			 * should disable I/O and memory space as well
710 			 * as bus mastering before we place the device
711 			 * into D3.
712 			 */
713 			reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
714 			reg &= ~PCI_COMMAND_IO_ENABLE;
715 			reg &= ~PCI_COMMAND_MEM_ENABLE;
716 			reg &= ~PCI_COMMAND_MASTER_ENABLE;
717 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, reg);
718 		}
719 		reg = pci_conf_read(pc, tag, offset + PCI_PMCSR);
720 		if ((reg & PCI_PMCSR_STATE_MASK) != state) {
721 			ostate = reg & PCI_PMCSR_STATE_MASK;
722 
723 			pci_conf_write(pc, tag, offset + PCI_PMCSR,
724 			    (reg & ~PCI_PMCSR_STATE_MASK) | state);
725 			if (state == PCI_PMCSR_STATE_D3 ||
726 			    ostate == PCI_PMCSR_STATE_D3)
727 				delay(10 * 1000);
728 		}
729 	}
730 
731 	/*
732 	 * Warn the firmware that the device is now in the given
733 	 * state.
734 	 */
735 	pci_set_powerstate_md(pc, tag, state, 0);
736 
737 	return (ostate);
738 }
739 
740 #ifndef PCI_MACHDEP_ENUMERATE_BUS
741 /*
742  * Generic PCI bus enumeration routine.  Used unless machine-dependent
743  * code needs to provide something else.
744  */
745 int
746 pci_enumerate_bus(struct pci_softc *sc,
747     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
748 {
749 	pci_chipset_tag_t pc = sc->sc_pc;
750 	int device, function, nfunctions, ret;
751 	const struct pci_quirkdata *qd;
752 	pcireg_t id, bhlcr;
753 	pcitag_t tag;
754 
755 	for (device = 0; device < sc->sc_maxndevs; device++) {
756 		tag = pci_make_tag(pc, sc->sc_bus, device, 0);
757 
758 		bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
759 		if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
760 			continue;
761 
762 		id = pci_conf_read(pc, tag, PCI_ID_REG);
763 
764 		/* Invalid vendor ID value? */
765 		if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
766 			continue;
767 		/* XXX Not invalid, but we've done this ~forever. */
768 		if (PCI_VENDOR(id) == 0)
769 			continue;
770 
771 		qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
772 
773 		if (qd != NULL &&
774 		      (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)
775 			nfunctions = 8;
776 		else if (qd != NULL &&
777 		      (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0)
778 			nfunctions = 1;
779 		else
780 			nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
781 
782 		for (function = 0; function < nfunctions; function++) {
783 			tag = pci_make_tag(pc, sc->sc_bus, device, function);
784 			ret = pci_probe_device(sc, tag, match, pap);
785 			if (match != NULL && ret != 0)
786 				return (ret);
787 		}
788  	}
789 
790 	return (0);
791 }
792 #endif /* PCI_MACHDEP_ENUMERATE_BUS */
793 
794 int
795 pci_reserve_resources(struct pci_attach_args *pa)
796 {
797 	pci_chipset_tag_t pc = pa->pa_pc;
798 	pcitag_t tag = pa->pa_tag;
799 	pcireg_t bhlc, blr, type, bir;
800 	pcireg_t addr, mask;
801 	bus_addr_t base, limit;
802 	bus_size_t size;
803 	int reg, reg_start, reg_end, reg_rom;
804 	int bus, dev, func;
805 	int sec, sub;
806 	int flags;
807 	int s;
808 
809 	pci_decompose_tag(pc, tag, &bus, &dev, &func);
810 
811 	bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG);
812 	switch (PCI_HDRTYPE_TYPE(bhlc)) {
813 	case 0:
814 		reg_start = PCI_MAPREG_START;
815 		reg_end = PCI_MAPREG_END;
816 		reg_rom = PCI_ROM_REG;
817 		break;
818 	case 1: /* PCI-PCI bridge */
819 		reg_start = PCI_MAPREG_START;
820 		reg_end = PCI_MAPREG_PPB_END;
821 		reg_rom = 0;	/* 0x38 */
822 		break;
823 	case 2: /* PCI-CardBus bridge */
824 		reg_start = PCI_MAPREG_START;
825 		reg_end = PCI_MAPREG_PCB_END;
826 		reg_rom = 0;
827 		break;
828 	default:
829 		return (0);
830 	}
831 
832 	for (reg = reg_start; reg < reg_end; reg += 4) {
833 		if (!pci_mapreg_probe(pc, tag, reg, &type))
834 			continue;
835 
836 		if (pci_mapreg_info(pc, tag, reg, type, &base, &size, &flags))
837 			continue;
838 
839 		if (base == 0)
840 			continue;
841 
842 		switch (type) {
843 		case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
844 		case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
845 			if (ISSET(flags, BUS_SPACE_MAP_PREFETCHABLE) &&
846 			    pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
847 			    base, size, EX_NOWAIT) == 0) {
848 				break;
849 			}
850 #ifdef __sparc64__
851 			/*
852 			 * Certain SPARC T5 systems assign
853 			 * non-prefetchable 64-bit BARs of its onboard
854 			 * mpii(4) controllers addresses in the
855 			 * prefetchable memory range.  This is
856 			 * (probably) safe, as reads from the device
857 			 * registers mapped by these BARs are
858 			 * side-effect free.  So assume the firmware
859 			 * knows what it is doing.
860 			 */
861 			if (base >= 0x100000000 &&
862 			    pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
863 			    base, size, EX_NOWAIT) == 0) {
864 				break;
865 			}
866 #endif
867 			if (pa->pa_memex && extent_alloc_region(pa->pa_memex,
868 			    base, size, EX_NOWAIT)) {
869 				printf("%d:%d:%d: mem address conflict 0x%lx/0x%lx\n",
870 				    bus, dev, func, base, size);
871 				pci_conf_write(pc, tag, reg, 0);
872 				if (type & PCI_MAPREG_MEM_TYPE_64BIT)
873 					pci_conf_write(pc, tag, reg + 4, 0);
874 			}
875 			break;
876 		case PCI_MAPREG_TYPE_IO:
877 			if (pa->pa_ioex && extent_alloc_region(pa->pa_ioex,
878 			    base, size, EX_NOWAIT)) {
879 				printf("%d:%d:%d: io address conflict 0x%lx/0x%lx\n",
880 				    bus, dev, func, base, size);
881 				pci_conf_write(pc, tag, reg, 0);
882 			}
883 			break;
884 		}
885 
886 		if (type & PCI_MAPREG_MEM_TYPE_64BIT)
887 			reg += 4;
888 	}
889 
890 	if (reg_rom != 0) {
891 		s = splhigh();
892 		addr = pci_conf_read(pc, tag, PCI_ROM_REG);
893 		pci_conf_write(pc, tag, PCI_ROM_REG, ~PCI_ROM_ENABLE);
894 		mask = pci_conf_read(pc, tag, PCI_ROM_REG);
895 		pci_conf_write(pc, tag, PCI_ROM_REG, addr);
896 		splx(s);
897 
898 		base = PCI_ROM_ADDR(addr);
899 		size = PCI_ROM_SIZE(mask);
900 		if (base != 0 && size != 0) {
901 			if (pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
902 			    base, size, EX_NOWAIT) &&
903 			    pa->pa_memex && extent_alloc_region(pa->pa_memex,
904 			    base, size, EX_NOWAIT)) {
905 				printf("%d:%d:%d: mem address conflict 0x%lx/0x%lx\n",
906 				    bus, dev, func, base, size);
907 				pci_conf_write(pc, tag, PCI_ROM_REG, 0);
908 			}
909 		}
910 	}
911 
912 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
913 		return (0);
914 
915 	/* Figure out the I/O address range of the bridge. */
916 	blr = pci_conf_read(pc, tag, PPB_REG_IOSTATUS);
917 	base = (blr & 0x000000f0) << 8;
918 	limit = (blr & 0x000f000) | 0x00000fff;
919 	blr = pci_conf_read(pc, tag, PPB_REG_IO_HI);
920 	base |= (blr & 0x0000ffff) << 16;
921 	limit |= (blr & 0xffff0000);
922 	if (limit > base)
923 		size = (limit - base + 1);
924 	else
925 		size = 0;
926 	if (pa->pa_ioex && base > 0 && size > 0) {
927 		if (extent_alloc_region(pa->pa_ioex, base, size, EX_NOWAIT)) {
928 			printf("%d:%d:%d: bridge io address conflict 0x%lx/0x%lx\n",
929 			    bus, dev, func, base, size);
930 			blr &= 0xffff0000;
931 			blr |= 0x000000f0;
932 			pci_conf_write(pc, tag, PPB_REG_IOSTATUS, blr);
933 		}
934 	}
935 
936 	/* Figure out the memory mapped I/O address range of the bridge. */
937 	blr = pci_conf_read(pc, tag, PPB_REG_MEM);
938 	base = (blr & 0x0000fff0) << 16;
939 	limit = (blr & 0xfff00000) | 0x000fffff;
940 	if (limit > base)
941 		size = (limit - base + 1);
942 	else
943 		size = 0;
944 	if (pa->pa_memex && base > 0 && size > 0) {
945 		if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) {
946 			printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
947 			    bus, dev, func, base, size);
948 			pci_conf_write(pc, tag, PPB_REG_MEM, 0x0000fff0);
949 		}
950 	}
951 
952 	/* Figure out the prefetchable memory address range of the bridge. */
953 	blr = pci_conf_read(pc, tag, PPB_REG_PREFMEM);
954 	base = (blr & 0x0000fff0) << 16;
955 	limit = (blr & 0xfff00000) | 0x000fffff;
956 #ifdef __LP64__
957 	blr = pci_conf_read(pc, pa->pa_tag, PPB_REG_PREFBASE_HI32);
958 	base |= ((uint64_t)blr) << 32;
959 	blr = pci_conf_read(pc, pa->pa_tag, PPB_REG_PREFLIM_HI32);
960 	limit |= ((uint64_t)blr) << 32;
961 #endif
962 	if (limit > base)
963 		size = (limit - base + 1);
964 	else
965 		size = 0;
966 	if (pa->pa_pmemex && base > 0 && size > 0) {
967 		if (extent_alloc_region(pa->pa_pmemex, base, size, EX_NOWAIT)) {
968 			printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
969 			    bus, dev, func, base, size);
970 			pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0);
971 		}
972 	} else if (pa->pa_memex && base > 0 && size > 0) {
973 		if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) {
974 			printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
975 			    bus, dev, func, base, size);
976 			pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0);
977 		}
978 	}
979 
980 	/* Figure out the bus range handled by the bridge. */
981 	bir = pci_conf_read(pc, tag, PPB_REG_BUSINFO);
982 	sec = PPB_BUSINFO_SECONDARY(bir);
983 	sub = PPB_BUSINFO_SUBORDINATE(bir);
984 	if (pa->pa_busex && sub >= sec && sub > 0) {
985 		if (extent_alloc_region(pa->pa_busex, sec, sub - sec + 1,
986 		    EX_NOWAIT)) {
987 			printf("%d:%d:%d: bridge bus conflict %d-%d\n",
988 			    bus, dev, func, sec, sub);
989 		}
990 	}
991 
992 	return (0);
993 }
994 
995 /*
996  * Vital Product Data (PCI 2.2)
997  */
998 
999 int
1000 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
1001     pcireg_t *data)
1002 {
1003 	uint32_t reg;
1004 	int ofs, i, j;
1005 
1006 	KASSERT(data != NULL);
1007 	KASSERT((offset + count) < 0x7fff);
1008 
1009 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
1010 		return (1);
1011 
1012 	for (i = 0; i < count; offset += sizeof(*data), i++) {
1013 		reg &= 0x0000ffff;
1014 		reg &= ~PCI_VPD_OPFLAG;
1015 		reg |= PCI_VPD_ADDRESS(offset);
1016 		pci_conf_write(pc, tag, ofs, reg);
1017 
1018 		/*
1019 		 * PCI 2.2 does not specify how long we should poll
1020 		 * for completion nor whether the operation can fail.
1021 		 */
1022 		j = 0;
1023 		do {
1024 			if (j++ == 20)
1025 				return (1);
1026 			delay(4);
1027 			reg = pci_conf_read(pc, tag, ofs);
1028 		} while ((reg & PCI_VPD_OPFLAG) == 0);
1029 		data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
1030 	}
1031 
1032 	return (0);
1033 }
1034 
1035 int
1036 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
1037     pcireg_t *data)
1038 {
1039 	pcireg_t reg;
1040 	int ofs, i, j;
1041 
1042 	KASSERT(data != NULL);
1043 	KASSERT((offset + count) < 0x7fff);
1044 
1045 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
1046 		return (1);
1047 
1048 	for (i = 0; i < count; offset += sizeof(*data), i++) {
1049 		pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
1050 
1051 		reg &= 0x0000ffff;
1052 		reg |= PCI_VPD_OPFLAG;
1053 		reg |= PCI_VPD_ADDRESS(offset);
1054 		pci_conf_write(pc, tag, ofs, reg);
1055 
1056 		/*
1057 		 * PCI 2.2 does not specify how long we should poll
1058 		 * for completion nor whether the operation can fail.
1059 		 */
1060 		j = 0;
1061 		do {
1062 			if (j++ == 20)
1063 				return (1);
1064 			delay(1);
1065 			reg = pci_conf_read(pc, tag, ofs);
1066 		} while (reg & PCI_VPD_OPFLAG);
1067 	}
1068 
1069 	return (0);
1070 }
1071 
1072 int
1073 pci_matchbyid(struct pci_attach_args *pa, const struct pci_matchid *ids,
1074     int nent)
1075 {
1076 	const struct pci_matchid *pm;
1077 	int i;
1078 
1079 	for (i = 0, pm = ids; i < nent; i++, pm++)
1080 		if (PCI_VENDOR(pa->pa_id) == pm->pm_vid &&
1081 		    PCI_PRODUCT(pa->pa_id) == pm->pm_pid)
1082 			return (1);
1083 	return (0);
1084 }
1085 
1086 void
1087 pci_disable_legacy_vga(struct device *dev)
1088 {
1089 	struct pci_softc *pci;
1090 	struct pci_dev *pd;
1091 
1092 	/* XXX Until we attach the drm drivers directly to pci. */
1093 	while (dev->dv_parent->dv_cfdata->cf_driver != &pci_cd)
1094 		dev = dev->dv_parent;
1095 
1096 	pci = (struct pci_softc *)dev->dv_parent;
1097 	LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
1098 		if (pd->pd_dev == dev) {
1099 			pd->pd_vga_decode = 0;
1100 			break;
1101 		}
1102 	}
1103 }
1104 
1105 #ifdef USER_PCICONF
1106 /*
1107  * This is the user interface to PCI configuration space.
1108  */
1109 
1110 #include <sys/pciio.h>
1111 #include <sys/fcntl.h>
1112 
1113 #ifdef DEBUG
1114 #define PCIDEBUG(x) printf x
1115 #else
1116 #define PCIDEBUG(x)
1117 #endif
1118 
1119 void pci_disable_vga(pci_chipset_tag_t, pcitag_t);
1120 void pci_enable_vga(pci_chipset_tag_t, pcitag_t);
1121 void pci_route_vga(struct pci_softc *);
1122 void pci_unroute_vga(struct pci_softc *);
1123 
1124 int pciopen(dev_t dev, int oflags, int devtype, struct proc *p);
1125 int pciclose(dev_t dev, int flag, int devtype, struct proc *p);
1126 int pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p);
1127 
1128 int
1129 pciopen(dev_t dev, int oflags, int devtype, struct proc *p)
1130 {
1131 	PCIDEBUG(("pciopen ndevs: %d\n" , pci_cd.cd_ndevs));
1132 
1133 	if (minor(dev) >= pci_ndomains) {
1134 		return ENXIO;
1135 	}
1136 
1137 #ifndef APERTURE
1138 	if ((oflags & FWRITE) && securelevel > 0) {
1139 		return EPERM;
1140 	}
1141 #else
1142 	if ((oflags & FWRITE) && securelevel > 0 && allowaperture == 0) {
1143 		return EPERM;
1144 	}
1145 #endif
1146 	return (0);
1147 }
1148 
1149 int
1150 pciclose(dev_t dev, int flag, int devtype, struct proc *p)
1151 {
1152 	PCIDEBUG(("pciclose\n"));
1153 
1154 	pci_vga_proc = NULL;
1155 	return (0);
1156 }
1157 
1158 int
1159 pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1160 {
1161 	struct pcisel *sel = (struct pcisel *)data;
1162 	struct pci_io *io;
1163 	struct pci_rom *rom;
1164 	int i, error;
1165 	pcitag_t tag;
1166 	struct pci_softc *pci;
1167 	pci_chipset_tag_t pc;
1168 
1169 	switch (cmd) {
1170 	case PCIOCREAD:
1171 	case PCIOCREADMASK:
1172 		break;
1173 	case PCIOCWRITE:
1174 		if (!(flag & FWRITE))
1175 			return EPERM;
1176 		break;
1177 	case PCIOCGETROMLEN:
1178 	case PCIOCGETROM:
1179 		break;
1180 	case PCIOCGETVGA:
1181 	case PCIOCSETVGA:
1182 		if (pci_vga_pci == NULL)
1183 			return EINVAL;
1184 		break;
1185 	default:
1186 		return ENOTTY;
1187 	}
1188 
1189 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
1190 		pci = pci_cd.cd_devs[i];
1191 		if (pci != NULL && pci->sc_domain == minor(dev) &&
1192 		    pci->sc_bus == sel->pc_bus)
1193 			break;
1194 	}
1195 	if (i >= pci_cd.cd_ndevs)
1196 		return ENXIO;
1197 
1198 	/* Check bounds */
1199 	if (pci->sc_bus >= 256 ||
1200 	    sel->pc_dev >= pci_bus_maxdevs(pci->sc_pc, pci->sc_bus) ||
1201 	    sel->pc_func >= 8)
1202 		return EINVAL;
1203 
1204 	pc = pci->sc_pc;
1205 	tag = pci_make_tag(pc, sel->pc_bus, sel->pc_dev, sel->pc_func);
1206 
1207 	switch (cmd) {
1208 	case PCIOCREAD:
1209 		io = (struct pci_io *)data;
1210 		switch (io->pi_width) {
1211 		case 4:
1212 			/* Configuration space bounds check */
1213 			if (io->pi_reg < 0 ||
1214 			    io->pi_reg >= pci_conf_size(pc, tag))
1215 				return EINVAL;
1216 			/* Make sure the register is properly aligned */
1217 			if (io->pi_reg & 0x3)
1218 				return EINVAL;
1219 			io->pi_data = pci_conf_read(pc, tag, io->pi_reg);
1220 			error = 0;
1221 			break;
1222 		default:
1223 			error = EINVAL;
1224 			break;
1225 		}
1226 		break;
1227 
1228 	case PCIOCWRITE:
1229 		io = (struct pci_io *)data;
1230 		switch (io->pi_width) {
1231 		case 4:
1232 			/* Configuration space bounds check */
1233 			if (io->pi_reg < 0 ||
1234 			    io->pi_reg >= pci_conf_size(pc, tag))
1235 				return EINVAL;
1236 			/* Make sure the register is properly aligned */
1237 			if (io->pi_reg & 0x3)
1238 				return EINVAL;
1239 			pci_conf_write(pc, tag, io->pi_reg, io->pi_data);
1240 			error = 0;
1241 			break;
1242 		default:
1243 			error = EINVAL;
1244 			break;
1245 		}
1246 		break;
1247 
1248 	case PCIOCREADMASK:
1249 	{
1250 		io = (struct pci_io *)data;
1251 		struct pci_dev *pd;
1252 		int dev, func, i;
1253 
1254 		if (io->pi_width != 4 || io->pi_reg & 0x3 ||
1255 		    io->pi_reg < PCI_MAPREG_START ||
1256 		    io->pi_reg >= PCI_MAPREG_END)
1257 			return (EINVAL);
1258 
1259 		error = ENODEV;
1260 		LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
1261 			pci_decompose_tag(pc, pd->pd_tag, NULL, &dev, &func);
1262 			if (dev == sel->pc_dev && func == sel->pc_func) {
1263 				i = (io->pi_reg - PCI_MAPREG_START) / 4;
1264 				io->pi_data = pd->pd_mask[i];
1265 				error = 0;
1266 				break;
1267 			}
1268 		}
1269 		break;
1270 	}
1271 
1272 	case PCIOCGETROMLEN:
1273 	case PCIOCGETROM:
1274 	{
1275 		pcireg_t addr, mask, bhlc;
1276 		bus_space_handle_t h;
1277 		bus_size_t len, off;
1278 		char buf[256];
1279 		int s;
1280 
1281 		rom = (struct pci_rom *)data;
1282 
1283 		bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG);
1284 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
1285 			return (ENODEV);
1286 
1287 		s = splhigh();
1288 		addr = pci_conf_read(pc, tag, PCI_ROM_REG);
1289 		pci_conf_write(pc, tag, PCI_ROM_REG, ~PCI_ROM_ENABLE);
1290 		mask = pci_conf_read(pc, tag, PCI_ROM_REG);
1291 		pci_conf_write(pc, tag, PCI_ROM_REG, addr);
1292 		splx(s);
1293 
1294 		/*
1295 		 * Section 6.2.5.2 `Expansion ROM Base Addres Register',
1296 		 *
1297 		 * tells us that only the upper 21 bits are writable.
1298 		 * This means that the size of a ROM must be a
1299 		 * multiple of 2 KB.  So reading the ROM in chunks of
1300 		 * 256 bytes should work just fine.
1301 		 */
1302 		if ((PCI_ROM_ADDR(addr) == 0 ||
1303 		     PCI_ROM_SIZE(mask) % sizeof(buf)) != 0)
1304 			return (ENODEV);
1305 
1306 		/* If we're just after the size, skip reading the ROM. */
1307 		if (cmd == PCIOCGETROMLEN) {
1308 			error = 0;
1309 			goto fail;
1310 		}
1311 
1312 		if (rom->pr_romlen < PCI_ROM_SIZE(mask)) {
1313 			error = ENOMEM;
1314 			goto fail;
1315 		}
1316 
1317 		error = bus_space_map(pci->sc_memt, PCI_ROM_ADDR(addr),
1318 		    PCI_ROM_SIZE(mask), 0, &h);
1319 		if (error)
1320 			goto fail;
1321 
1322 		off = 0;
1323 		len = PCI_ROM_SIZE(mask);
1324 		while (len > 0 && error == 0) {
1325 			s = splhigh();
1326 			pci_conf_write(pc, tag, PCI_ROM_REG,
1327 			    addr | PCI_ROM_ENABLE);
1328 			bus_space_read_region_1(pci->sc_memt, h, off,
1329 			    buf, sizeof(buf));
1330 			pci_conf_write(pc, tag, PCI_ROM_REG, addr);
1331 			splx(s);
1332 
1333 			error = copyout(buf, rom->pr_rom + off, sizeof(buf));
1334 			off += sizeof(buf);
1335 			len -= sizeof(buf);
1336 		}
1337 
1338 		bus_space_unmap(pci->sc_memt, h, PCI_ROM_SIZE(mask));
1339 
1340 	fail:
1341 		rom->pr_romlen = PCI_ROM_SIZE(mask);
1342 		break;
1343 	}
1344 
1345 	case PCIOCGETVGA:
1346 	{
1347 		struct pci_vga *vga = (struct pci_vga *)data;
1348 		struct pci_dev *pd;
1349 		int bus, dev, func;
1350 
1351 		vga->pv_decode = 0;
1352 		LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
1353 			pci_decompose_tag(pc, pd->pd_tag, NULL, &dev, &func);
1354 			if (dev == sel->pc_dev && func == sel->pc_func) {
1355 				if (pd->pd_vga_decode)
1356 					vga->pv_decode = PCI_VGA_IO_ENABLE |
1357 					    PCI_VGA_MEM_ENABLE;
1358 				break;
1359 			}
1360 		}
1361 
1362 		pci_decompose_tag(pci_vga_pci->sc_pc,
1363 		    pci_vga_tag, &bus, &dev, &func);
1364 		vga->pv_sel.pc_bus = bus;
1365 		vga->pv_sel.pc_dev = dev;
1366 		vga->pv_sel.pc_func = func;
1367 		error = 0;
1368 		break;
1369 	}
1370 	case PCIOCSETVGA:
1371 	{
1372 		struct pci_vga *vga = (struct pci_vga *)data;
1373 		int bus, dev, func;
1374 
1375 		switch (vga->pv_lock) {
1376 		case PCI_VGA_UNLOCK:
1377 		case PCI_VGA_LOCK:
1378 		case PCI_VGA_TRYLOCK:
1379 			break;
1380 		default:
1381 			return (EINVAL);
1382 		}
1383 
1384 		if (vga->pv_lock == PCI_VGA_UNLOCK) {
1385 			if (pci_vga_proc != p)
1386 				return (EINVAL);
1387 			pci_vga_proc = NULL;
1388 			wakeup(&pci_vga_proc);
1389 			return (0);
1390 		}
1391 
1392 		while (pci_vga_proc != p && pci_vga_proc != NULL) {
1393 			if (vga->pv_lock == PCI_VGA_TRYLOCK)
1394 				return (EBUSY);
1395 			error = tsleep(&pci_vga_proc, PLOCK | PCATCH,
1396 			    "vgalk", 0);
1397 			if (error)
1398 				return (error);
1399 		}
1400 		pci_vga_proc = p;
1401 
1402 		pci_decompose_tag(pci_vga_pci->sc_pc,
1403 		    pci_vga_tag, &bus, &dev, &func);
1404 		if (bus != vga->pv_sel.pc_bus || dev != vga->pv_sel.pc_dev ||
1405 		    func != vga->pv_sel.pc_func) {
1406 			pci_disable_vga(pci_vga_pci->sc_pc, pci_vga_tag);
1407 			if (pci != pci_vga_pci) {
1408 				pci_unroute_vga(pci_vga_pci);
1409 				pci_route_vga(pci);
1410 				pci_vga_pci = pci;
1411 			}
1412 			pci_enable_vga(pc, tag);
1413 			pci_vga_tag = tag;
1414 		}
1415 
1416 		error = 0;
1417 		break;
1418 	}
1419 
1420 	default:
1421 		error = ENOTTY;
1422 		break;
1423 	}
1424 
1425 	return (error);
1426 }
1427 
1428 void
1429 pci_disable_vga(pci_chipset_tag_t pc, pcitag_t tag)
1430 {
1431 	pcireg_t csr;
1432 
1433 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1434 	csr &= ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE);
1435 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1436 }
1437 
1438 void
1439 pci_enable_vga(pci_chipset_tag_t pc, pcitag_t tag)
1440 {
1441 	pcireg_t csr;
1442 
1443 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1444 	csr |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE;
1445 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1446 }
1447 
1448 void
1449 pci_route_vga(struct pci_softc *sc)
1450 {
1451 	pci_chipset_tag_t pc = sc->sc_pc;
1452 	pcireg_t bc;
1453 
1454 	if (sc->sc_bridgetag == NULL)
1455 		return;
1456 
1457 	bc = pci_conf_read(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL);
1458 	bc |= PPB_BC_VGA_ENABLE;
1459 	pci_conf_write(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL, bc);
1460 
1461 	pci_route_vga((struct pci_softc *)sc->sc_dev.dv_parent->dv_parent);
1462 }
1463 
1464 void
1465 pci_unroute_vga(struct pci_softc *sc)
1466 {
1467 	pci_chipset_tag_t pc = sc->sc_pc;
1468 	pcireg_t bc;
1469 
1470 	if (sc->sc_bridgetag == NULL)
1471 		return;
1472 
1473 	bc = pci_conf_read(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL);
1474 	bc &= ~PPB_BC_VGA_ENABLE;
1475 	pci_conf_write(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL, bc);
1476 
1477 	pci_unroute_vga((struct pci_softc *)sc->sc_dev.dv_parent->dv_parent);
1478 }
1479 #endif /* USER_PCICONF */
1480 
1481 int
1482 pci_primary_vga(struct pci_attach_args *pa)
1483 {
1484 	/* XXX For now, only handle the first PCI domain. */
1485 	if (pa->pa_domain != 0)
1486 		return (0);
1487 
1488 	if ((PCI_CLASS(pa->pa_class) != PCI_CLASS_DISPLAY ||
1489 	    PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_DISPLAY_VGA) &&
1490 	    (PCI_CLASS(pa->pa_class) != PCI_CLASS_PREHISTORIC ||
1491 	    PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_PREHISTORIC_VGA))
1492 		return (0);
1493 
1494 	if ((pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG)
1495 	    & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
1496 	    != (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
1497 		return (0);
1498 
1499 	pci_vga_tag = pa->pa_tag;
1500 
1501 	return (1);
1502 }
1503