xref: /openbsd/sys/dev/pci/pci.c (revision 891d7ab6)
1 /*	$OpenBSD: pci.c,v 1.93 2011/06/12 11:13:28 kettenis Exp $	*/
2 /*	$NetBSD: pci.c,v 1.31 1997/06/06 23:48:04 thorpej Exp $	*/
3 
4 /*
5  * Copyright (c) 1995, 1996 Christopher G. Demetriou.  All rights reserved.
6  * Copyright (c) 1994 Charles Hannum.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Charles Hannum.
19  * 4. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * PCI bus autoconfiguration.
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/proc.h>
43 
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcidevs.h>
47 #include <dev/pci/ppbreg.h>
48 
49 int pcimatch(struct device *, void *, void *);
50 void pciattach(struct device *, struct device *, void *);
51 int pcidetach(struct device *, int);
52 int pciactivate(struct device *, int);
53 void pci_suspend(struct pci_softc *);
54 void pci_resume(struct pci_softc *);
55 
56 #define NMAPREG			((PCI_MAPREG_END - PCI_MAPREG_START) / \
57 				    sizeof(pcireg_t))
58 struct pci_dev {
59 	LIST_ENTRY(pci_dev) pd_next;
60 	pcitag_t pd_tag;        /* pci register tag */
61 	pcireg_t pd_csr;
62 	pcireg_t pd_bhlc;
63 	pcireg_t pd_int;
64 	pcireg_t pd_map[NMAPREG];
65 	pcireg_t pd_mask[NMAPREG];
66 	pcireg_t pd_msi_mc;
67 	pcireg_t pd_msi_ma;
68 	pcireg_t pd_msi_mau32;
69 	pcireg_t pd_msi_md;
70 	int pd_pmcsr_state;
71 };
72 
73 #ifdef APERTURE
74 extern int allowaperture;
75 #endif
76 
77 struct cfattach pci_ca = {
78 	sizeof(struct pci_softc), pcimatch, pciattach, pcidetach, pciactivate
79 };
80 
81 struct cfdriver pci_cd = {
82 	NULL, "pci", DV_DULL
83 };
84 
85 int	pci_ndomains;
86 
87 struct proc *pci_vga_proc;
88 struct pci_softc *pci_vga_pci;
89 pcitag_t pci_vga_tag;
90 int	pci_vga_count;
91 
92 int	pci_dopm;
93 
94 int	pciprint(void *, const char *);
95 int	pcisubmatch(struct device *, void *, void *);
96 
97 #ifdef PCI_MACHDEP_ENUMERATE_BUS
98 #define pci_enumerate_bus PCI_MACHDEP_ENUMERATE_BUS
99 #else
100 int pci_enumerate_bus(struct pci_softc *,
101     int (*)(struct pci_attach_args *), struct pci_attach_args *);
102 #endif
103 int	pci_reserve_resources(struct pci_attach_args *);
104 int	pci_count_vga(struct pci_attach_args *);
105 int	pci_primary_vga(struct pci_attach_args *);
106 
107 /*
108  * Important note about PCI-ISA bridges:
109  *
110  * Callbacks are used to configure these devices so that ISA/EISA bridges
111  * can attach their child busses after PCI configuration is done.
112  *
113  * This works because:
114  *	(1) there can be at most one ISA/EISA bridge per PCI bus, and
115  *	(2) any ISA/EISA bridges must be attached to primary PCI
116  *	    busses (i.e. bus zero).
117  *
118  * That boils down to: there can only be one of these outstanding
119  * at a time, it is cleared when configuring PCI bus 0 before any
120  * subdevices have been found, and it is run after all subdevices
121  * of PCI bus 0 have been found.
122  *
123  * This is needed because there are some (legacy) PCI devices which
124  * can show up as ISA/EISA devices as well (the prime example of which
125  * are VGA controllers).  If you attach ISA from a PCI-ISA/EISA bridge,
126  * and the bridge is seen before the video board is, the board can show
127  * up as an ISA device, and that can (bogusly) complicate the PCI device's
128  * attach code, or make the PCI device not be properly attached at all.
129  *
130  * We use the generic config_defer() facility to achieve this.
131  */
132 
133 int
134 pcimatch(struct device *parent, void *match, void *aux)
135 {
136 	struct cfdata *cf = match;
137 	struct pcibus_attach_args *pba = aux;
138 
139 	if (strcmp(pba->pba_busname, cf->cf_driver->cd_name))
140 		return (0);
141 
142 	/* Check the locators */
143 	if (cf->pcibuscf_bus != PCIBUS_UNK_BUS &&
144 	    cf->pcibuscf_bus != pba->pba_bus)
145 		return (0);
146 
147 	/* sanity */
148 	if (pba->pba_bus < 0 || pba->pba_bus > 255)
149 		return (0);
150 
151 	/*
152 	 * XXX check other (hardware?) indicators
153 	 */
154 
155 	return (1);
156 }
157 
158 void
159 pciattach(struct device *parent, struct device *self, void *aux)
160 {
161 	struct pcibus_attach_args *pba = aux;
162 	struct pci_softc *sc = (struct pci_softc *)self;
163 
164 	pci_attach_hook(parent, self, pba);
165 
166 	printf("\n");
167 
168 	LIST_INIT(&sc->sc_devs);
169 
170 	sc->sc_iot = pba->pba_iot;
171 	sc->sc_memt = pba->pba_memt;
172 	sc->sc_dmat = pba->pba_dmat;
173 	sc->sc_pc = pba->pba_pc;
174 	sc->sc_flags = pba->pba_flags;
175 	sc->sc_ioex = pba->pba_ioex;
176 	sc->sc_memex = pba->pba_memex;
177 	sc->sc_pmemex = pba->pba_pmemex;
178 	sc->sc_domain = pba->pba_domain;
179 	sc->sc_bus = pba->pba_bus;
180 	sc->sc_bridgetag = pba->pba_bridgetag;
181 	sc->sc_bridgeih = pba->pba_bridgeih;
182 	sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
183 	sc->sc_intrswiz = pba->pba_intrswiz;
184 	sc->sc_intrtag = pba->pba_intrtag;
185 	pci_enumerate_bus(sc, pci_reserve_resources, NULL);
186 	pci_enumerate_bus(sc, pci_count_vga, NULL);
187 	if (pci_enumerate_bus(sc, pci_primary_vga, NULL))
188 		pci_vga_pci = sc;
189 	pci_enumerate_bus(sc, NULL, NULL);
190 }
191 
192 int
193 pcidetach(struct device *self, int flags)
194 {
195 	return pci_detach_devices((struct pci_softc *)self, flags);
196 }
197 
198 int
199 pciactivate(struct device *self, int act)
200 {
201 	int rv = 0;
202 
203 	switch (act) {
204 	case DVACT_QUIESCE:
205 		rv = config_activate_children(self, act);
206 		break;
207 	case DVACT_SUSPEND:
208 		rv = config_activate_children(self, act);
209 		pci_suspend((struct pci_softc *)self);
210 		break;
211 	case DVACT_RESUME:
212 		pci_resume((struct pci_softc *)self);
213 		rv = config_activate_children(self, act);
214 		break;
215 	}
216 	return (rv);
217 }
218 
219 void
220 pci_suspend(struct pci_softc *sc)
221 {
222 	struct pci_dev *pd;
223 	pcireg_t bhlc, reg;
224 	int off, i;
225 
226 	LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
227 		/*
228 		 * Only handle header type 0 here; PCI-PCI bridges and
229 		 * CardBus bridges need special handling, which will
230 		 * be done in their specific drivers.
231 		 */
232 		bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
233 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
234 			continue;
235 
236 		/* Save registers that may get lost. */
237 		for (i = 0; i < NMAPREG; i++)
238 			pd->pd_map[i] = pci_conf_read(sc->sc_pc, pd->pd_tag,
239 			    PCI_MAPREG_START + (i * 4));
240 		pd->pd_csr = pci_conf_read(sc->sc_pc, pd->pd_tag,
241 		    PCI_COMMAND_STATUS_REG);
242 		pd->pd_bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag,
243 		    PCI_BHLC_REG);
244 		pd->pd_int = pci_conf_read(sc->sc_pc, pd->pd_tag,
245 		    PCI_INTERRUPT_REG);
246 
247 		if (pci_get_capability(sc->sc_pc, pd->pd_tag,
248 		    PCI_CAP_MSI, &off, &reg)) {
249 			pd->pd_msi_ma = pci_conf_read(sc->sc_pc, pd->pd_tag,
250 			    off + PCI_MSI_MA);
251 			if (reg & PCI_MSI_MC_C64) {
252 				pd->pd_msi_mau32 = pci_conf_read(sc->sc_pc,
253 				    pd->pd_tag, off + PCI_MSI_MAU32);
254 				pd->pd_msi_md = pci_conf_read(sc->sc_pc,
255 				    pd->pd_tag, off + PCI_MSI_MD64);
256 			} else {
257 				pd->pd_msi_md = pci_conf_read(sc->sc_pc,
258 				    pd->pd_tag, off + PCI_MSI_MD32);
259 			}
260 			pd->pd_msi_mc = reg;
261 		}
262 
263 		if (pci_dopm) {
264 			/* Place the device into D3. */
265 			pd->pd_pmcsr_state = pci_get_powerstate(sc->sc_pc,
266 			    pd->pd_tag);
267 			pci_set_powerstate(sc->sc_pc, pd->pd_tag,
268 			    PCI_PMCSR_STATE_D3);
269 		}
270 	}
271 }
272 
273 void
274 pci_resume(struct pci_softc *sc)
275 {
276 	struct pci_dev *pd;
277 	pcireg_t bhlc, reg;
278 	int off, i;
279 
280 	LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
281 		/*
282 		 * Only handle header type 0 here; PCI-PCI bridges and
283 		 * CardBus bridges need special handling, which will
284 		 * be done in their specific drivers.
285 		 */
286 		bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
287 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
288 			continue;
289 
290 		if (pci_dopm) {
291 			/* Restore power. */
292 			pci_set_powerstate(sc->sc_pc, pd->pd_tag,
293 			    pd->pd_pmcsr_state);
294 		}
295 
296 		/* Restore the registers saved above. */
297 		for (i = 0; i < NMAPREG; i++)
298 			pci_conf_write(sc->sc_pc, pd->pd_tag,
299 			    PCI_MAPREG_START + (i * 4), pd->pd_map[i]);
300 		reg = pci_conf_read(sc->sc_pc, pd->pd_tag,
301 		    PCI_COMMAND_STATUS_REG);
302 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_COMMAND_STATUS_REG,
303 		    (reg & 0xffff0000) | (pd->pd_csr & 0x0000ffff));
304 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG,
305 		    pd->pd_bhlc);
306 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_INTERRUPT_REG,
307 		    pd->pd_int);
308 
309 		if (pci_get_capability(sc->sc_pc, pd->pd_tag,
310 		    PCI_CAP_MSI, &off, &reg)) {
311 			pci_conf_write(sc->sc_pc, pd->pd_tag,
312 			    off + PCI_MSI_MA, pd->pd_msi_ma);
313 			if (reg & PCI_MSI_MC_C64) {
314 				pci_conf_write(sc->sc_pc, pd->pd_tag,
315 				    off + PCI_MSI_MAU32, pd->pd_msi_mau32);
316 				pci_conf_write(sc->sc_pc, pd->pd_tag,
317 				    off + PCI_MSI_MD64, pd->pd_msi_md);
318 			} else {
319 				pci_conf_write(sc->sc_pc, pd->pd_tag,
320 				    off + PCI_MSI_MD32, pd->pd_msi_md);
321 			}
322 			pci_conf_write(sc->sc_pc, pd->pd_tag,
323 			    off + PCI_MSI_MC, pd->pd_msi_mc);
324 		}
325 	}
326 }
327 
328 int
329 pciprint(void *aux, const char *pnp)
330 {
331 	struct pci_attach_args *pa = aux;
332 	char devinfo[256];
333 
334 	if (pnp) {
335 		pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo,
336 		    sizeof devinfo);
337 		printf("%s at %s", devinfo, pnp);
338 	}
339 	printf(" dev %d function %d", pa->pa_device, pa->pa_function);
340 	if (!pnp) {
341 		pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo,
342 		    sizeof devinfo);
343 		printf(" %s", devinfo);
344 	}
345 
346 	return (UNCONF);
347 }
348 
349 int
350 pcisubmatch(struct device *parent, void *match,  void *aux)
351 {
352 	struct cfdata *cf = match;
353 	struct pci_attach_args *pa = aux;
354 
355 	if (cf->pcicf_dev != PCI_UNK_DEV &&
356 	    cf->pcicf_dev != pa->pa_device)
357 		return (0);
358 	if (cf->pcicf_function != PCI_UNK_FUNCTION &&
359 	    cf->pcicf_function != pa->pa_function)
360 		return (0);
361 
362 	return ((*cf->cf_attach->ca_match)(parent, match, aux));
363 }
364 
365 int
366 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
367     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
368 {
369 	pci_chipset_tag_t pc = sc->sc_pc;
370 	struct pci_attach_args pa;
371 	struct pci_dev *pd;
372 	struct device *dev;
373 	pcireg_t id, class, intr, bhlcr, cap;
374 	int pin, bus, device, function;
375 	int off, ret = 0;
376 	uint64_t addr;
377 
378 	pci_decompose_tag(pc, tag, &bus, &device, &function);
379 
380 	bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
381 	if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
382 		return (0);
383 
384 	id = pci_conf_read(pc, tag, PCI_ID_REG);
385 	class = pci_conf_read(pc, tag, PCI_CLASS_REG);
386 
387 	/* Invalid vendor ID value? */
388 	if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
389 		return (0);
390 	/* XXX Not invalid, but we've done this ~forever. */
391 	if (PCI_VENDOR(id) == 0)
392 		return (0);
393 
394 	pa.pa_iot = sc->sc_iot;
395 	pa.pa_memt = sc->sc_memt;
396 	pa.pa_dmat = sc->sc_dmat;
397 	pa.pa_pc = pc;
398 	pa.pa_ioex = sc->sc_ioex;
399 	pa.pa_memex = sc->sc_memex;
400 	pa.pa_pmemex = sc->sc_pmemex;
401 	pa.pa_domain = sc->sc_domain;
402 	pa.pa_bus = bus;
403 	pa.pa_device = device;
404 	pa.pa_function = function;
405 	pa.pa_tag = tag;
406 	pa.pa_id = id;
407 	pa.pa_class = class;
408 	pa.pa_bridgetag = sc->sc_bridgetag;
409 	pa.pa_bridgeih = sc->sc_bridgeih;
410 
411 	/* This is a simplification of the NetBSD code.
412 	   We don't support turning off I/O or memory
413 	   on broken hardware. <csapuntz@stanford.edu> */
414 	pa.pa_flags = sc->sc_flags;
415 	pa.pa_flags |= PCI_FLAGS_IO_ENABLED | PCI_FLAGS_MEM_ENABLED;
416 
417 	if (sc->sc_bridgetag == NULL) {
418 		pa.pa_intrswiz = 0;
419 		pa.pa_intrtag = tag;
420 	} else {
421 		pa.pa_intrswiz = sc->sc_intrswiz + device;
422 		pa.pa_intrtag = sc->sc_intrtag;
423 	}
424 
425 	intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
426 
427 	pin = PCI_INTERRUPT_PIN(intr);
428 	pa.pa_rawintrpin = pin;
429 	if (pin == PCI_INTERRUPT_PIN_NONE) {
430 		/* no interrupt */
431 		pa.pa_intrpin = 0;
432 	} else {
433 		/*
434 		 * swizzle it based on the number of busses we're
435 		 * behind and our device number.
436 		 */
437 		pa.pa_intrpin = 	/* XXX */
438 		    ((pin + pa.pa_intrswiz - 1) % 4) + 1;
439 	}
440 	pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
441 
442 	if (pci_get_ht_capability(pc, tag, PCI_HT_CAP_MSI, &off, &cap)) {
443 		/*
444 		 * XXX Should we enable MSI mapping ourselves on
445 		 * systems that have it disabled?
446 		 */
447 		if (cap & PCI_HT_MSI_ENABLED) {
448 			if ((cap & PCI_HT_MSI_FIXED) == 0) {
449 				addr = pci_conf_read(pc, tag,
450 				    off + PCI_HT_MSI_ADDR);
451 				addr |= (uint64_t)pci_conf_read(pc, tag,
452 				    off + PCI_HT_MSI_ADDR_HI32) << 32;
453 			} else
454 				addr = PCI_HT_MSI_FIXED_ADDR;
455 
456 			/*
457 			 * XXX This will fail to enable MSI on systems
458 			 * that don't use the canonical address.
459 			 */
460 			if (addr == PCI_HT_MSI_FIXED_ADDR)
461 				pa.pa_flags |= PCI_FLAGS_MSI_ENABLED;
462 		}
463 	}
464 
465 	if (match != NULL) {
466 		ret = (*match)(&pa);
467 		if (ret != 0 && pap != NULL)
468 			*pap = pa;
469 	} else {
470 		pcireg_t address, csr;
471 		int i, reg, reg_start, reg_end;
472 		int s;
473 
474 		pd = malloc(sizeof *pd, M_DEVBUF, M_ZERO | M_WAITOK);
475 		pd->pd_tag = tag;
476 		LIST_INSERT_HEAD(&sc->sc_devs, pd, pd_next);
477 
478 		switch (PCI_HDRTYPE_TYPE(bhlcr)) {
479 		case 0:
480 			reg_start = PCI_MAPREG_START;
481 			reg_end = PCI_MAPREG_END;
482 			break;
483 		case 1: /* PCI-PCI bridge */
484 			reg_start = PCI_MAPREG_START;
485 			reg_end = PCI_MAPREG_PPB_END;
486 			break;
487 		case 2: /* PCI-CardBus bridge */
488 			reg_start = PCI_MAPREG_START;
489 			reg_end = PCI_MAPREG_PCB_END;
490 			break;
491 		default:
492 			return (0);
493 		}
494 
495 		s = splhigh();
496 		csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
497 		if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
498 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr &
499 			    ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE));
500 
501 		for (reg = reg_start, i = 0; reg < reg_end; reg += 4, i++) {
502 			address = pci_conf_read(pc, tag, reg);
503 			pci_conf_write(pc, tag, reg, 0xffffffff);
504 			pd->pd_mask[i] = pci_conf_read(pc, tag, reg);
505 			pci_conf_write(pc, tag, reg, address);
506 		}
507 
508 		if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
509 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
510 		splx(s);
511 
512 		if ((dev = config_found_sm(&sc->sc_dev, &pa, pciprint,
513 		    pcisubmatch)))
514 			pci_dev_postattach(dev, &pa);
515 	}
516 
517 	return (ret);
518 }
519 
520 int
521 pci_detach_devices(struct pci_softc *sc, int flags)
522 {
523 	struct pci_dev *pd, *next;
524 	int ret;
525 
526 	ret = config_detach_children(&sc->sc_dev, flags);
527 	if (ret != 0)
528 		return (ret);
529 
530 	for (pd = LIST_FIRST(&sc->sc_devs);
531 	     pd != LIST_END(&sc->sc_devs); pd = next) {
532 		next = LIST_NEXT(pd, pd_next);
533 		free(pd, M_DEVBUF);
534 	}
535 	LIST_INIT(&sc->sc_devs);
536 
537 	return (0);
538 }
539 
540 int
541 pci_get_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
542     int *offset, pcireg_t *value)
543 {
544 	pcireg_t reg;
545 	unsigned int ofs;
546 
547 	reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
548 	if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
549 		return (0);
550 
551 	/* Determine the Capability List Pointer register to start with. */
552 	reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
553 	switch (PCI_HDRTYPE_TYPE(reg)) {
554 	case 0:	/* standard device header */
555 	case 1: /* PCI-PCI bridge header */
556 		ofs = PCI_CAPLISTPTR_REG;
557 		break;
558 	case 2:	/* PCI-CardBus bridge header */
559 		ofs = PCI_CARDBUS_CAPLISTPTR_REG;
560 		break;
561 	default:
562 		return (0);
563 	}
564 
565 	ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
566 	while (ofs != 0) {
567 		/*
568 		 * Some devices, like parts of the NVIDIA C51 chipset,
569 		 * have a broken Capabilities List.  So we need to do
570 		 * a sanity check here.
571 		 */
572 		if ((ofs & 3) || (ofs < 0x40))
573 			return (0);
574 		reg = pci_conf_read(pc, tag, ofs);
575 		if (PCI_CAPLIST_CAP(reg) == capid) {
576 			if (offset)
577 				*offset = ofs;
578 			if (value)
579 				*value = reg;
580 			return (1);
581 		}
582 		ofs = PCI_CAPLIST_NEXT(reg);
583 	}
584 
585 	return (0);
586 }
587 
588 int
589 pci_get_ht_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
590     int *offset, pcireg_t *value)
591 {
592 	pcireg_t reg;
593 	unsigned int ofs;
594 
595 	if (pci_get_capability(pc, tag, PCI_CAP_HT, &ofs, NULL) == 0)
596 		return (0);
597 
598 	while (ofs != 0) {
599 #ifdef DIAGNOSTIC
600 		if ((ofs & 3) || (ofs < 0x40))
601 			panic("pci_get_ht_capability");
602 #endif
603 		reg = pci_conf_read(pc, tag, ofs);
604 		if (PCI_HT_CAP(reg) == capid) {
605 			if (offset)
606 				*offset = ofs;
607 			if (value)
608 				*value = reg;
609 			return (1);
610 		}
611 		ofs = PCI_CAPLIST_NEXT(reg);
612 	}
613 
614 	return (0);
615 }
616 
617 int
618 pci_find_device(struct pci_attach_args *pa,
619     int (*match)(struct pci_attach_args *))
620 {
621 	extern struct cfdriver pci_cd;
622 	struct device *pcidev;
623 	int i;
624 
625 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
626 		pcidev = pci_cd.cd_devs[i];
627 		if (pcidev != NULL &&
628 		    pci_enumerate_bus((struct pci_softc *)pcidev,
629 		    		      match, pa) != 0)
630 			return (1);
631 	}
632 	return (0);
633 }
634 
635 int
636 pci_get_powerstate(pci_chipset_tag_t pc, pcitag_t tag)
637 {
638 	pcireg_t reg;
639 	int offset;
640 
641 	if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) {
642 		reg = pci_conf_read(pc, tag, offset + PCI_PMCSR);
643 		return (reg & PCI_PMCSR_STATE_MASK);
644 	}
645 	return (PCI_PMCSR_STATE_D0);
646 }
647 
648 int
649 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, int state)
650 {
651 	pcireg_t reg;
652 	int offset;
653 
654 	if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) {
655 		if (state == PCI_PMCSR_STATE_D3) {
656 			/*
657 			 * The PCI Power Management spec says we
658 			 * should disable I/O and memory space as well
659 			 * as bus mastering before we place the device
660 			 * into D3.
661 			 */
662 			reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
663 			reg &= ~PCI_COMMAND_IO_ENABLE;
664 			reg &= ~PCI_COMMAND_MEM_ENABLE;
665 			reg &= ~PCI_COMMAND_MASTER_ENABLE;
666 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, reg);
667 		}
668 		reg = pci_conf_read(pc, tag, offset + PCI_PMCSR);
669 		if ((reg & PCI_PMCSR_STATE_MASK) != state) {
670 			int ostate = reg & PCI_PMCSR_STATE_MASK;
671 
672 			pci_conf_write(pc, tag, offset + PCI_PMCSR,
673 			    (reg & ~PCI_PMCSR_STATE_MASK) | state);
674 			if (state == PCI_PMCSR_STATE_D3 ||
675 			    ostate == PCI_PMCSR_STATE_D3)
676 				delay(10 * 1000);
677 			return (ostate);
678 		}
679 	}
680 	return (state);
681 }
682 
683 #ifndef PCI_MACHDEP_ENUMERATE_BUS
684 /*
685  * Generic PCI bus enumeration routine.  Used unless machine-dependent
686  * code needs to provide something else.
687  */
688 int
689 pci_enumerate_bus(struct pci_softc *sc,
690     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
691 {
692 	pci_chipset_tag_t pc = sc->sc_pc;
693 	int device, function, nfunctions, ret;
694 	const struct pci_quirkdata *qd;
695 	pcireg_t id, bhlcr;
696 	pcitag_t tag;
697 
698 	for (device = 0; device < sc->sc_maxndevs; device++) {
699 		tag = pci_make_tag(pc, sc->sc_bus, device, 0);
700 
701 		bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
702 		if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
703 			continue;
704 
705 		id = pci_conf_read(pc, tag, PCI_ID_REG);
706 
707 		/* Invalid vendor ID value? */
708 		if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
709 			continue;
710 		/* XXX Not invalid, but we've done this ~forever. */
711 		if (PCI_VENDOR(id) == 0)
712 			continue;
713 
714 		qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
715 
716 		if (qd != NULL &&
717 		      (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)
718 			nfunctions = 8;
719 		else if (qd != NULL &&
720 		      (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0)
721 			nfunctions = 1;
722 		else
723 			nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
724 
725 		for (function = 0; function < nfunctions; function++) {
726 			tag = pci_make_tag(pc, sc->sc_bus, device, function);
727 			ret = pci_probe_device(sc, tag, match, pap);
728 			if (match != NULL && ret != 0)
729 				return (ret);
730 		}
731  	}
732 
733 	return (0);
734 }
735 #endif /* PCI_MACHDEP_ENUMERATE_BUS */
736 
737 int
738 pci_reserve_resources(struct pci_attach_args *pa)
739 {
740 	pci_chipset_tag_t pc = pa->pa_pc;
741 	pcitag_t tag = pa->pa_tag;
742 	pcireg_t bhlc, blr, type;
743 	bus_addr_t base, limit;
744 	bus_size_t size;
745 	int reg, reg_start, reg_end;
746 	int flags;
747 
748 	bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG);
749 	switch (PCI_HDRTYPE_TYPE(bhlc)) {
750 	case 0:
751 		reg_start = PCI_MAPREG_START;
752 		reg_end = PCI_MAPREG_END;
753 		break;
754 	case 1: /* PCI-PCI bridge */
755 		reg_start = PCI_MAPREG_START;
756 		reg_end = PCI_MAPREG_PPB_END;
757 		break;
758 	case 2: /* PCI-CardBus bridge */
759 		reg_start = PCI_MAPREG_START;
760 		reg_end = PCI_MAPREG_PCB_END;
761 		break;
762 	default:
763 		return (0);
764 	}
765 
766 	for (reg = reg_start; reg < reg_end; reg += 4) {
767 		if (!pci_mapreg_probe(pc, tag, reg, &type))
768 			continue;
769 
770 		if (pci_mapreg_info(pc, tag, reg, type, &base, &size, &flags))
771 			continue;
772 
773 		if (base == 0)
774 			continue;
775 
776 		switch (type) {
777 		case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
778 		case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
779 #ifdef BUS_SPACE_MAP_PREFETCHABLE
780 			if (ISSET(flags, BUS_SPACE_MAP_PREFETCHABLE) &&
781 			    pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
782 			    base, size, EX_NOWAIT) == 0) {
783 				break;
784 			}
785 #endif
786 			if (pa->pa_memex && extent_alloc_region(pa->pa_memex,
787 			    base, size, EX_NOWAIT)) {
788 				printf("mem address conflict 0x%x/0x%x\n",
789 				    base, size);
790 				pci_conf_write(pc, tag, reg, 0);
791 				if (type & PCI_MAPREG_MEM_TYPE_64BIT)
792 					pci_conf_write(pc, tag, reg + 4, 0);
793 			}
794 			break;
795 		case PCI_MAPREG_TYPE_IO:
796 			if (pa->pa_ioex && extent_alloc_region(pa->pa_ioex,
797 			    base, size, EX_NOWAIT)) {
798 				printf("io address conflict 0x%x/0x%x\n",
799 				    base, size);
800 				pci_conf_write(pc, tag, reg, 0);
801 			}
802 			break;
803 		}
804 
805 		if (type & PCI_MAPREG_MEM_TYPE_64BIT)
806 			reg += 4;
807 	}
808 
809 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
810 		return (0);
811 
812 	/* Figure out the I/O address range of the bridge. */
813 	blr = pci_conf_read(pc, tag, PPB_REG_IOSTATUS);
814 	base = (blr & 0x000000f0) << 8;
815 	limit = (blr & 0x000f000) | 0x00000fff;
816 	blr = pci_conf_read(pc, tag, PPB_REG_IO_HI);
817 	base |= (blr & 0x0000ffff) << 16;
818 	limit |= (blr & 0xffff0000);
819 	if (limit > base)
820 		size = (limit - base + 1);
821 	else
822 		size = 0;
823 	if (pa->pa_ioex && base > 0 && size > 0) {
824 		if (extent_alloc_region(pa->pa_ioex, base, size, EX_NOWAIT)) {
825 			printf("bridge io address conflict 0x%x/0x%x\n",
826 			       base, size);
827 			blr &= 0xffff0000;
828 			blr |= 0x000000f0;
829 			pci_conf_write(pc, tag, PPB_REG_IOSTATUS, blr);
830 		}
831 	}
832 
833 	/* Figure out the memory mapped I/O address range of the bridge. */
834 	blr = pci_conf_read(pc, tag, PPB_REG_MEM);
835 	base = (blr & 0x0000fff0) << 16;
836 	limit = (blr & 0xfff00000) | 0x000fffff;
837 	if (limit > base)
838 		size = (limit - base + 1);
839 	else
840 		size = 0;
841 	if (pa->pa_memex && base > 0 && size > 0) {
842 		if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) {
843 			printf("bridge mem address conflict 0x%x/0x%x\n",
844 			       base, size);
845 			pci_conf_write(pc, tag, PPB_REG_MEM, 0x0000fff0);
846 		}
847 	}
848 
849 	/* Figure out the prefetchable memory address range of the bridge. */
850 	blr = pci_conf_read(pc, tag, PPB_REG_PREFMEM);
851 	base = (blr & 0x0000fff0) << 16;
852 	limit = (blr & 0xfff00000) | 0x000fffff;
853 	if (limit > base)
854 		size = (limit - base + 1);
855 	else
856 		size = 0;
857 	if (pa->pa_pmemex && base > 0 && size > 0) {
858 		if (extent_alloc_region(pa->pa_pmemex, base, size, EX_NOWAIT)) {
859 			printf("bridge mem address conflict 0x%x/0x%x\n",
860 			       base, size);
861 			pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0);
862 		}
863 	} else if (pa->pa_memex && base > 0 && size > 0) {
864 		if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) {
865 			printf("bridge mem address conflict 0x%x/0x%x\n",
866 			       base, size);
867 			pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0);
868 		}
869 	}
870 
871 	return (0);
872 }
873 
874 /*
875  * Vital Product Data (PCI 2.2)
876  */
877 
878 int
879 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
880     pcireg_t *data)
881 {
882 	uint32_t reg;
883 	int ofs, i, j;
884 
885 	KASSERT(data != NULL);
886 	KASSERT((offset + count) < 0x7fff);
887 
888 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
889 		return (1);
890 
891 	for (i = 0; i < count; offset += sizeof(*data), i++) {
892 		reg &= 0x0000ffff;
893 		reg &= ~PCI_VPD_OPFLAG;
894 		reg |= PCI_VPD_ADDRESS(offset);
895 		pci_conf_write(pc, tag, ofs, reg);
896 
897 		/*
898 		 * PCI 2.2 does not specify how long we should poll
899 		 * for completion nor whether the operation can fail.
900 		 */
901 		j = 0;
902 		do {
903 			if (j++ == 20)
904 				return (1);
905 			delay(4);
906 			reg = pci_conf_read(pc, tag, ofs);
907 		} while ((reg & PCI_VPD_OPFLAG) == 0);
908 		data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
909 	}
910 
911 	return (0);
912 }
913 
914 int
915 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
916     pcireg_t *data)
917 {
918 	pcireg_t reg;
919 	int ofs, i, j;
920 
921 	KASSERT(data != NULL);
922 	KASSERT((offset + count) < 0x7fff);
923 
924 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
925 		return (1);
926 
927 	for (i = 0; i < count; offset += sizeof(*data), i++) {
928 		pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
929 
930 		reg &= 0x0000ffff;
931 		reg |= PCI_VPD_OPFLAG;
932 		reg |= PCI_VPD_ADDRESS(offset);
933 		pci_conf_write(pc, tag, ofs, reg);
934 
935 		/*
936 		 * PCI 2.2 does not specify how long we should poll
937 		 * for completion nor whether the operation can fail.
938 		 */
939 		j = 0;
940 		do {
941 			if (j++ == 20)
942 				return (1);
943 			delay(1);
944 			reg = pci_conf_read(pc, tag, ofs);
945 		} while (reg & PCI_VPD_OPFLAG);
946 	}
947 
948 	return (0);
949 }
950 
951 int
952 pci_matchbyid(struct pci_attach_args *pa, const struct pci_matchid *ids,
953     int nent)
954 {
955 	const struct pci_matchid *pm;
956 	int i;
957 
958 	for (i = 0, pm = ids; i < nent; i++, pm++)
959 		if (PCI_VENDOR(pa->pa_id) == pm->pm_vid &&
960 		    PCI_PRODUCT(pa->pa_id) == pm->pm_pid)
961 			return (1);
962 	return (0);
963 }
964 
965 #ifdef USER_PCICONF
966 /*
967  * This is the user interface to PCI configuration space.
968  */
969 
970 #include <sys/pciio.h>
971 #include <sys/fcntl.h>
972 
973 #ifdef DEBUG
974 #define PCIDEBUG(x) printf x
975 #else
976 #define PCIDEBUG(x)
977 #endif
978 
979 void pci_disable_vga(pci_chipset_tag_t, pcitag_t);
980 void pci_enable_vga(pci_chipset_tag_t, pcitag_t);
981 void pci_route_vga(struct pci_softc *);
982 void pci_unroute_vga(struct pci_softc *);
983 
984 int pciopen(dev_t dev, int oflags, int devtype, struct proc *p);
985 int pciclose(dev_t dev, int flag, int devtype, struct proc *p);
986 int pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p);
987 
988 int
989 pciopen(dev_t dev, int oflags, int devtype, struct proc *p)
990 {
991 	PCIDEBUG(("pciopen ndevs: %d\n" , pci_cd.cd_ndevs));
992 
993 	if (minor(dev) >= pci_ndomains) {
994 		return ENXIO;
995 	}
996 
997 #ifndef APERTURE
998 	if ((oflags & FWRITE) && securelevel > 0) {
999 		return EPERM;
1000 	}
1001 #else
1002 	if ((oflags & FWRITE) && securelevel > 0 && allowaperture == 0) {
1003 		return EPERM;
1004 	}
1005 #endif
1006 	return (0);
1007 }
1008 
1009 int
1010 pciclose(dev_t dev, int flag, int devtype, struct proc *p)
1011 {
1012 	PCIDEBUG(("pciclose\n"));
1013 
1014 	pci_vga_proc = NULL;
1015 	return (0);
1016 }
1017 
1018 int
1019 pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1020 {
1021 	struct pcisel *sel = (struct pcisel *)data;
1022 	struct pci_io *io;
1023 	struct pci_rom *rom;
1024 	int i, error;
1025 	pcitag_t tag;
1026 	struct pci_softc *pci;
1027 	pci_chipset_tag_t pc;
1028 
1029 	switch (cmd) {
1030 	case PCIOCREAD:
1031 	case PCIOCREADMASK:
1032 		break;
1033 	case PCIOCWRITE:
1034 		if (!(flag & FWRITE))
1035 			return EPERM;
1036 		break;
1037 	case PCIOCGETROMLEN:
1038 	case PCIOCGETROM:
1039 		break;
1040 	case PCIOCGETVGA:
1041 	case PCIOCSETVGA:
1042 		if (pci_vga_pci == NULL)
1043 			return EINVAL;
1044 		break;
1045 	default:
1046 		return ENOTTY;
1047 	}
1048 
1049 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
1050 		pci = pci_cd.cd_devs[i];
1051 		if (pci != NULL && pci->sc_domain == minor(dev) &&
1052 		    pci->sc_bus == sel->pc_bus)
1053 			break;
1054 	}
1055 	if (i >= pci_cd.cd_ndevs)
1056 		return ENXIO;
1057 
1058 	/* Check bounds */
1059 	if (pci->sc_bus >= 256 ||
1060 	    sel->pc_dev >= pci_bus_maxdevs(pci->sc_pc, pci->sc_bus) ||
1061 	    sel->pc_func >= 8)
1062 		return EINVAL;
1063 
1064 	pc = pci->sc_pc;
1065 	tag = pci_make_tag(pc, sel->pc_bus, sel->pc_dev, sel->pc_func);
1066 
1067 	switch (cmd) {
1068 	case PCIOCREAD:
1069 		io = (struct pci_io *)data;
1070 		switch (io->pi_width) {
1071 		case 4:
1072 			/* Configuration space bounds check */
1073 			if (io->pi_reg < 0 ||
1074 			    io->pi_reg >= pci_conf_size(pc, tag))
1075 				return EINVAL;
1076 			/* Make sure the register is properly aligned */
1077 			if (io->pi_reg & 0x3)
1078 				return EINVAL;
1079 			io->pi_data = pci_conf_read(pc, tag, io->pi_reg);
1080 			error = 0;
1081 			break;
1082 		default:
1083 			error = EINVAL;
1084 			break;
1085 		}
1086 		break;
1087 
1088 	case PCIOCWRITE:
1089 		io = (struct pci_io *)data;
1090 		switch (io->pi_width) {
1091 		case 4:
1092 			/* Configuration space bounds check */
1093 			if (io->pi_reg < 0 ||
1094 			    io->pi_reg >= pci_conf_size(pc, tag))
1095 				return EINVAL;
1096 			/* Make sure the register is properly aligned */
1097 			if (io->pi_reg & 0x3)
1098 				return EINVAL;
1099 			pci_conf_write(pc, tag, io->pi_reg, io->pi_data);
1100 			error = 0;
1101 			break;
1102 		default:
1103 			error = EINVAL;
1104 			break;
1105 		}
1106 		break;
1107 
1108 	case PCIOCREADMASK:
1109 	{
1110 		io = (struct pci_io *)data;
1111 		struct pci_dev *pd;
1112 		int dev, func, i;
1113 
1114 		if (io->pi_width != 4 || io->pi_reg & 0x3 ||
1115 		    io->pi_reg < PCI_MAPREG_START ||
1116 		    io->pi_reg >= PCI_MAPREG_END)
1117 			return (EINVAL);
1118 
1119 		error = ENODEV;
1120 		LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
1121 			pci_decompose_tag(pc, pd->pd_tag, NULL, &dev, &func);
1122 			if (dev == sel->pc_dev && func == sel->pc_func) {
1123 				i = (io->pi_reg - PCI_MAPREG_START) / 4;
1124 				io->pi_data = pd->pd_mask[i];
1125 				error = 0;
1126 				break;
1127 			}
1128 		}
1129 		break;
1130 	}
1131 
1132 	case PCIOCGETROMLEN:
1133 	case PCIOCGETROM:
1134 	{
1135 		pcireg_t addr, mask, bhlc;
1136 		bus_space_handle_t h;
1137 		bus_size_t len, off;
1138 		char buf[256];
1139 		int s;
1140 
1141 		rom = (struct pci_rom *)data;
1142 
1143 		bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG);
1144 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
1145 			return (ENODEV);
1146 
1147 		s = splhigh();
1148 		addr = pci_conf_read(pc, tag, PCI_ROM_REG);
1149 		pci_conf_write(pc, tag, PCI_ROM_REG, ~PCI_ROM_ENABLE);
1150 		mask = pci_conf_read(pc, tag, PCI_ROM_REG);
1151 		pci_conf_write(pc, tag, PCI_ROM_REG, addr);
1152 		splx(s);
1153 
1154 		/*
1155 		 * Section 6.2.5.2 `Expansion ROM Base Addres Register',
1156 		 *
1157 		 * tells us that only the upper 21 bits are writable.
1158 		 * This means that the size of a ROM must be a
1159 		 * multiple of 2 KB.  So reading the ROM in chunks of
1160 		 * 256 bytes should work just fine.
1161 		 */
1162 		if ((PCI_ROM_ADDR(addr) == 0 ||
1163 		     PCI_ROM_SIZE(mask) % sizeof(buf)) != 0)
1164 			return (ENODEV);
1165 
1166 		/* If we're just after the size, skip reading the ROM. */
1167 		if (cmd == PCIOCGETROMLEN) {
1168 			error = 0;
1169 			goto fail;
1170 		}
1171 
1172 		if (rom->pr_romlen < PCI_ROM_SIZE(mask)) {
1173 			error = ENOMEM;
1174 			goto fail;
1175 		}
1176 
1177 		error = bus_space_map(pci->sc_memt, PCI_ROM_ADDR(addr),
1178 		    PCI_ROM_SIZE(mask), 0, &h);
1179 		if (error)
1180 			goto fail;
1181 
1182 		off = 0;
1183 		len = PCI_ROM_SIZE(mask);
1184 		while (len > 0 && error == 0) {
1185 			s = splhigh();
1186 			pci_conf_write(pc, tag, PCI_ROM_REG,
1187 			    addr | PCI_ROM_ENABLE);
1188 			bus_space_read_region_1(pci->sc_memt, h, off,
1189 			    buf, sizeof(buf));
1190 			pci_conf_write(pc, tag, PCI_ROM_REG, addr);
1191 			splx(s);
1192 
1193 			error = copyout(buf, rom->pr_rom + off, sizeof(buf));
1194 			off += sizeof(buf);
1195 			len -= sizeof(buf);
1196 		}
1197 
1198 		bus_space_unmap(pci->sc_memt, h, PCI_ROM_SIZE(mask));
1199 
1200 	fail:
1201 		rom->pr_romlen = PCI_ROM_SIZE(mask);
1202 		break;
1203 	}
1204 
1205 	case PCIOCGETVGA:
1206 	{
1207 		struct pci_vga *vga = (struct pci_vga *)data;
1208 		int bus, device, function;
1209 
1210 		pci_decompose_tag(pci_vga_pci->sc_pc, pci_vga_tag,
1211 		    &bus, &device, &function);
1212 		vga->pv_sel.pc_bus = bus;
1213 		vga->pv_sel.pc_dev = device;
1214 		vga->pv_sel.pc_func = function;
1215 		error = 0;
1216 		break;
1217 	}
1218 	case PCIOCSETVGA:
1219 	{
1220 		struct pci_vga *vga = (struct pci_vga *)data;
1221 		int bus, device, function;
1222 
1223 		switch (vga->pv_lock) {
1224 		case PCI_VGA_UNLOCK:
1225 		case PCI_VGA_LOCK:
1226 		case PCI_VGA_TRYLOCK:
1227 			break;
1228 		default:
1229 			return (EINVAL);
1230 		}
1231 
1232 		if (vga->pv_lock == PCI_VGA_UNLOCK) {
1233 			if (pci_vga_proc != p)
1234 				return (EINVAL);
1235 			pci_vga_proc = NULL;
1236 			wakeup(&pci_vga_proc);
1237 			return (0);
1238 		}
1239 
1240 		while (pci_vga_proc != p && pci_vga_proc != NULL) {
1241 			if (vga->pv_lock == PCI_VGA_TRYLOCK)
1242 				return (EBUSY);
1243 			error = tsleep(&pci_vga_proc, PLOCK | PCATCH,
1244 			    "vgalk", 0);
1245 			if (error)
1246 				return (error);
1247 		}
1248 		pci_vga_proc = p;
1249 
1250 		pci_decompose_tag(pci_vga_pci->sc_pc, pci_vga_tag,
1251 		    &bus, &device, &function);
1252 		if (bus != vga->pv_sel.pc_bus ||
1253 		    device != vga->pv_sel.pc_dev ||
1254 		    function != vga->pv_sel.pc_func) {
1255 			pci_disable_vga(pci_vga_pci->sc_pc, pci_vga_tag);
1256 			if (pci != pci_vga_pci) {
1257 				pci_unroute_vga(pci_vga_pci);
1258 				pci_route_vga(pci);
1259 				pci_vga_pci = pci;
1260 			}
1261 			pci_enable_vga(pc, tag);
1262 			pci_vga_tag = tag;
1263 		}
1264 
1265 		error = 0;
1266 		break;
1267 	}
1268 
1269 	default:
1270 		error = ENOTTY;
1271 		break;
1272 	}
1273 
1274 	return (error);
1275 }
1276 
1277 void
1278 pci_disable_vga(pci_chipset_tag_t pc, pcitag_t tag)
1279 {
1280 	pcireg_t csr;
1281 
1282 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1283 	csr &= ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE);
1284 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1285 }
1286 
1287 void
1288 pci_enable_vga(pci_chipset_tag_t pc, pcitag_t tag)
1289 {
1290 	pcireg_t csr;
1291 
1292 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1293 	csr |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE;
1294 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1295 }
1296 
1297 void
1298 pci_route_vga(struct pci_softc *sc)
1299 {
1300 	pci_chipset_tag_t pc = sc->sc_pc;
1301 	pcireg_t bc;
1302 
1303 	if (sc->sc_bridgetag == NULL)
1304 		return;
1305 
1306 	bc = pci_conf_read(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL);
1307 	bc |= PPB_BC_VGA_ENABLE;
1308 	pci_conf_write(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL, bc);
1309 
1310 	pci_route_vga((struct pci_softc *)sc->sc_dev.dv_parent->dv_parent);
1311 }
1312 
1313 void
1314 pci_unroute_vga(struct pci_softc *sc)
1315 {
1316 	pci_chipset_tag_t pc = sc->sc_pc;
1317 	pcireg_t bc;
1318 
1319 	if (sc->sc_bridgetag == NULL)
1320 		return;
1321 
1322 	bc = pci_conf_read(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL);
1323 	bc &= ~PPB_BC_VGA_ENABLE;
1324 	pci_conf_write(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL, bc);
1325 
1326 	pci_unroute_vga((struct pci_softc *)sc->sc_dev.dv_parent->dv_parent);
1327 }
1328 #endif /* USER_PCICONF */
1329 
1330 int
1331 pci_count_vga(struct pci_attach_args *pa)
1332 {
1333 	/* XXX For now, only handle the first PCI domain. */
1334 	if (pa->pa_domain != 0)
1335 		return (0);
1336 
1337 	if ((PCI_CLASS(pa->pa_class) != PCI_CLASS_DISPLAY ||
1338 	    PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_DISPLAY_VGA) &&
1339 	    (PCI_CLASS(pa->pa_class) != PCI_CLASS_PREHISTORIC ||
1340 	    PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_PREHISTORIC_VGA))
1341 		return (0);
1342 
1343 	pci_vga_count++;
1344 
1345 	return (0);
1346 }
1347 
1348 int
1349 pci_primary_vga(struct pci_attach_args *pa)
1350 {
1351 	/* XXX For now, only handle the first PCI domain. */
1352 	if (pa->pa_domain != 0)
1353 		return (0);
1354 
1355 	if ((PCI_CLASS(pa->pa_class) != PCI_CLASS_DISPLAY ||
1356 	    PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_DISPLAY_VGA) &&
1357 	    (PCI_CLASS(pa->pa_class) != PCI_CLASS_PREHISTORIC ||
1358 	    PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_PREHISTORIC_VGA))
1359 		return (0);
1360 
1361 	if ((pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG)
1362 	    & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
1363 	    != (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
1364 		return (0);
1365 
1366 	pci_vga_tag = pa->pa_tag;
1367 
1368 	return (1);
1369 }
1370