1 /*
2 * Copyright (c) 2008 Juan Romero Pardines
3 * Copyright (c) 2008 Mark Kettenis
4 * Copyright (c) 2009 Michael Lorenz
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #ifdef HAVE_CONFIG_H
20 #include "config.h"
21 #endif
22
23 #include <sys/param.h>
24 #include <sys/ioctl.h>
25 #include <sys/mman.h>
26 #include <sys/types.h>
27
28 #ifdef HAVE_MTRR
29 #include <machine/sysarch.h>
30 #include <machine/mtrr.h>
31 #ifdef _X86_SYSARCH_L
32 /* NetBSD 5.x and newer */
33 #define netbsd_set_mtrr(mr, num) _X86_SYSARCH_L(set_mtrr)(mr, num)
34 #else
35 /* NetBSD 4.x and older */
36 #ifdef __i386__
37 #define netbsd_set_mtrr(mr, num) i386_set_mtrr((mr), (num))
38 #endif
39 #ifdef __amd64__
40 #define netbsd_set_mtrr(mr, num) x86_64_set_mtrr((mr), (num))
41 #endif
42 #endif
43 #endif
44
45 #include <dev/pci/pcidevs.h>
46 #include <dev/pci/pciio.h>
47 #include <dev/pci/pcireg.h>
48
49 #include <errno.h>
50 #include <fcntl.h>
51 #include <stdio.h>
52 #include <stdlib.h>
53 #include <string.h>
54 #include <unistd.h>
55
56
57 #include <pci.h>
58 #include <dev/wscons/wsconsio.h>
59
60 #include "pciaccess.h"
61 #include "pciaccess_private.h"
62
63 typedef struct _pcibus {
64 int fd; /* /dev/pci* */
65 int num; /* bus number */
66 int maxdevs; /* maximum number of devices */
67 } PciBus;
68
69 static PciBus buses[32]; /* indexed by pci_device.domain */
70 static int nbuses = 0; /* number of buses found */
71
72 /*
73 * NetBSD's userland has a /dev/pci* entry for each bus but userland has no way
74 * to tell if a bus is a subordinate of another one or if it's on a different
75 * host bridge. On some architectures ( macppc for example ) all root buses have
76 * bus number 0 but on sparc64 for example the two roots in an Ultra60 have
77 * different bus numbers - one is 0 and the other 128.
78 * With each /dev/pci* we can map everything on the same root and we can also
79 * see all devices on the same root, trying to do that causes problems though:
80 * - since we can't tell which /dev/pci* is a subordinate we would find some
81 * devices more than once
82 * - we would have to guess subordinate bus numbers which is a waste of time
83 * since we can ask each /dev/pci* for its bus number so we can scan only the
84 * buses we know exist, not all 256 which may exist in each domain.
85 * - some bus_space_mmap() methods may limit mappings to address ranges which
86 * belong to known devices on that bus only.
87 * Each host bridge may or may not have its own IO range, to avoid guesswork
88 * here each /dev/pci* will let userland map its appropriate IO range at
89 * PCI_MAGIC_IO_RANGE if defined in <machine/param.h>
90 * With all this we should be able to use any PCI graphics device on any PCI
91 * bus on any architecture as long as Xorg has a driver, without allowing
92 * arbitrary mappings via /dev/mem and without userland having to know or care
93 * about translating bus addresses to physical addresses or the other way
94 * around.
95 */
96
97 static int
pci_read(int domain,int bus,int dev,int func,uint32_t reg,uint32_t * val)98 pci_read(int domain, int bus, int dev, int func, uint32_t reg, uint32_t *val)
99 {
100 uint32_t rval;
101
102 if ((domain < 0) || (domain > nbuses))
103 return -1;
104
105 if (pcibus_conf_read(buses[domain].fd, (unsigned int)bus,
106 (unsigned int)dev, (unsigned int)func, reg, &rval) == -1)
107 return (-1);
108
109 *val = rval;
110
111 return 0;
112 }
113
114 static int
pci_write(int domain,int bus,int dev,int func,uint32_t reg,uint32_t val)115 pci_write(int domain, int bus, int dev, int func, uint32_t reg, uint32_t val)
116 {
117
118 if ((domain < 0) || (domain > nbuses))
119 return -1;
120
121 return pcibus_conf_write(buses[domain].fd, (unsigned int)bus,
122 (unsigned int)dev, (unsigned int)func, reg, val);
123 }
124
125 static int
pci_nfuncs(int domain,int bus,int dev)126 pci_nfuncs(int domain, int bus, int dev)
127 {
128 uint32_t hdr;
129
130 if ((domain < 0) || (domain > nbuses))
131 return -1;
132
133 if (pci_read(domain, bus, dev, 0, PCI_BHLC_REG, &hdr) != 0)
134 return -1;
135
136 return (PCI_HDRTYPE_MULTIFN(hdr) ? 8 : 1);
137 }
138
139 /*ARGSUSED*/
140 static int
pci_device_netbsd_map_range(struct pci_device * dev,struct pci_device_mapping * map)141 pci_device_netbsd_map_range(struct pci_device *dev,
142 struct pci_device_mapping *map)
143 {
144 #ifdef HAVE_MTRR
145 struct mtrr m;
146 int n = 1;
147 #endif
148 int prot, ret = 0;
149
150 prot = PROT_READ;
151
152 if (map->flags & PCI_DEV_MAP_FLAG_WRITABLE)
153 prot |= PROT_WRITE;
154 map->memory = mmap(NULL, (size_t)map->size, prot, MAP_SHARED,
155 buses[dev->domain].fd, (off_t)map->base);
156 if (map->memory == MAP_FAILED)
157 return errno;
158
159 #ifdef HAVE_MTRR
160 memset(&m, 0, sizeof(m));
161
162 /* No need to set an MTRR if it's the default mode. */
163 if ((map->flags & PCI_DEV_MAP_FLAG_CACHABLE) ||
164 (map->flags & PCI_DEV_MAP_FLAG_WRITE_COMBINE)) {
165 m.base = map->base;
166 m.flags = MTRR_VALID | MTRR_PRIVATE;
167 m.len = map->size;
168 m.owner = getpid();
169 if (map->flags & PCI_DEV_MAP_FLAG_CACHABLE)
170 m.type = MTRR_TYPE_WB;
171 if (map->flags & PCI_DEV_MAP_FLAG_WRITE_COMBINE)
172 m.type = MTRR_TYPE_WC;
173
174 if ((netbsd_set_mtrr(&m, &n)) == -1) {
175 fprintf(stderr, "mtrr set failed: %s\n",
176 strerror(errno));
177 }
178 }
179 #endif
180
181 return ret;
182 }
183
184 static int
pci_device_netbsd_unmap_range(struct pci_device * dev,struct pci_device_mapping * map)185 pci_device_netbsd_unmap_range(struct pci_device *dev,
186 struct pci_device_mapping *map)
187 {
188 #ifdef HAVE_MTRR
189 struct mtrr m;
190 int n = 1;
191
192 memset(&m, 0, sizeof(m));
193
194 if ((map->flags & PCI_DEV_MAP_FLAG_CACHABLE) ||
195 (map->flags & PCI_DEV_MAP_FLAG_WRITE_COMBINE)) {
196 m.base = map->base;
197 m.flags = 0;
198 m.len = map->size;
199 m.type = MTRR_TYPE_UC;
200 (void)netbsd_set_mtrr(&m, &n);
201 }
202 #endif
203
204 return pci_device_generic_unmap_range(dev, map);
205 }
206
207 static int
pci_device_netbsd_read(struct pci_device * dev,void * data,pciaddr_t offset,pciaddr_t size,pciaddr_t * bytes_read)208 pci_device_netbsd_read(struct pci_device *dev, void *data,
209 pciaddr_t offset, pciaddr_t size, pciaddr_t *bytes_read)
210 {
211 u_int reg, rval;
212
213 *bytes_read = 0;
214 while (size > 0) {
215 size_t toread = MIN(size, 4 - (offset & 0x3));
216
217 reg = (u_int)(offset & ~0x3);
218
219 if ((pcibus_conf_read(buses[dev->domain].fd,
220 (unsigned int)dev->bus, (unsigned int)dev->dev,
221 (unsigned int)dev->func, reg, &rval)) == -1)
222 return errno;
223
224 rval = htole32(rval);
225 rval >>= ((offset & 0x3) * 8);
226
227 memcpy(data, &rval, toread);
228
229 offset += toread;
230 data = (char *)data + toread;
231 size -= toread;
232 *bytes_read += toread;
233 }
234
235 return 0;
236 }
237
238 static int
pci_device_netbsd_write(struct pci_device * dev,const void * data,pciaddr_t offset,pciaddr_t size,pciaddr_t * bytes_written)239 pci_device_netbsd_write(struct pci_device *dev, const void *data,
240 pciaddr_t offset, pciaddr_t size, pciaddr_t *bytes_written)
241 {
242 u_int reg, val;
243
244 if ((offset % 4) != 0 || (size % 4) != 0)
245 return EINVAL;
246
247 *bytes_written = 0;
248 while (size > 0) {
249 reg = (u_int)offset;
250 memcpy(&val, data, 4);
251
252 if ((pcibus_conf_write(buses[dev->domain].fd,
253 (unsigned int)dev->bus, (unsigned int)dev->dev,
254 (unsigned int)dev->func, reg, val)) == -1)
255 return errno;
256
257 offset += 4;
258 data = (const char *)data + 4;
259 size -= 4;
260 *bytes_written += 4;
261 }
262
263 return 0;
264 }
265
266 #if defined(WSDISPLAYIO_GET_BUSID)
267 static int
pci_device_netbsd_boot_vga(struct pci_device * dev)268 pci_device_netbsd_boot_vga(struct pci_device *dev)
269 {
270 int ret;
271 struct wsdisplayio_bus_id busid;
272 int fd;
273
274 fd = open("/dev/ttyE0", O_RDONLY);
275 if (fd == -1) {
276 fprintf(stderr, "failed to open /dev/ttyE0: %s\n",
277 strerror(errno));
278 return 0;
279 }
280
281 ret = ioctl(fd, WSDISPLAYIO_GET_BUSID, &busid);
282 close(fd);
283 if (ret == -1) {
284 fprintf(stderr, "ioctl WSDISPLAYIO_GET_BUSID failed: %s\n",
285 strerror(errno));
286 return 0;
287 }
288
289 if (busid.bus_type != WSDISPLAYIO_BUS_PCI)
290 return 0;
291
292 if (busid.ubus.pci.domain != dev->domain)
293 return 0;
294 if (busid.ubus.pci.bus != dev->bus)
295 return 0;
296 if (busid.ubus.pci.device != dev->dev)
297 return 0;
298 if (busid.ubus.pci.function != dev->func)
299 return 0;
300
301 return 1;
302 }
303 #endif
304
305 static void
pci_system_netbsd_destroy(void)306 pci_system_netbsd_destroy(void)
307 {
308 int i;
309
310 for (i = 0; i < nbuses; i++) {
311 close(buses[i].fd);
312 }
313 free(pci_sys);
314 pci_sys = NULL;
315 }
316
317 static int
pci_device_netbsd_probe(struct pci_device * device)318 pci_device_netbsd_probe(struct pci_device *device)
319 {
320 struct pci_device_private *priv =
321 (struct pci_device_private *)(void *)device;
322 struct pci_mem_region *region;
323 uint64_t reg64, size64;
324 uint32_t bar, reg, size;
325 int bus, dev, func, err, domain;
326
327 domain = device->domain;
328 bus = device->bus;
329 dev = device->dev;
330 func = device->func;
331
332 /* Enable the device if necessary */
333 err = pci_read(domain, bus, dev, func, PCI_COMMAND_STATUS_REG, ®);
334 if (err)
335 return err;
336 if ((reg & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE)) !=
337 (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE)) {
338 reg |= PCI_COMMAND_IO_ENABLE |
339 PCI_COMMAND_MEM_ENABLE |
340 PCI_COMMAND_MASTER_ENABLE;
341 err = pci_write(domain, bus, dev, func, PCI_COMMAND_STATUS_REG,
342 reg);
343 if (err)
344 return err;
345 }
346
347 err = pci_read(domain, bus, dev, func, PCI_BHLC_REG, ®);
348 if (err)
349 return err;
350
351 priv->header_type = PCI_HDRTYPE_TYPE(reg);
352 if (priv->header_type != 0)
353 return 0;
354
355 region = device->regions;
356 for (bar = PCI_MAPREG_START; bar < PCI_MAPREG_END;
357 bar += sizeof(uint32_t), region++) {
358 err = pci_read(domain, bus, dev, func, bar, ®);
359 if (err)
360 return err;
361
362 /* Probe the size of the region. */
363 err = pci_write(domain, bus, dev, func, bar, (unsigned int)~0);
364 if (err)
365 return err;
366 pci_read(domain, bus, dev, func, bar, &size);
367 pci_write(domain, bus, dev, func, bar, reg);
368
369 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) {
370 region->is_IO = 1;
371 region->base_addr = PCI_MAPREG_IO_ADDR(reg);
372 region->size = PCI_MAPREG_IO_SIZE(size);
373 } else {
374 if (PCI_MAPREG_MEM_PREFETCHABLE(reg))
375 region->is_prefetchable = 1;
376 switch(PCI_MAPREG_MEM_TYPE(reg)) {
377 case PCI_MAPREG_MEM_TYPE_32BIT:
378 case PCI_MAPREG_MEM_TYPE_32BIT_1M:
379 region->base_addr = PCI_MAPREG_MEM_ADDR(reg);
380 region->size = PCI_MAPREG_MEM_SIZE(size);
381 break;
382 case PCI_MAPREG_MEM_TYPE_64BIT:
383 region->is_64 = 1;
384
385 reg64 = reg;
386 size64 = size;
387
388 bar += sizeof(uint32_t);
389
390 err = pci_read(domain, bus, dev, func, bar, ®);
391 if (err)
392 return err;
393 reg64 |= (uint64_t)reg << 32;
394
395 err = pci_write(domain, bus, dev, func, bar,
396 (unsigned int)~0);
397 if (err)
398 return err;
399 pci_read(domain, bus, dev, func, bar, &size);
400 pci_write(domain, bus, dev, func, bar,
401 (unsigned int)(reg64 >> 32));
402 size64 |= (uint64_t)size << 32;
403
404 region->base_addr =
405 (unsigned long)PCI_MAPREG_MEM64_ADDR(reg64);
406 region->size =
407 (unsigned long)PCI_MAPREG_MEM64_SIZE(size64);
408 region++;
409 break;
410 }
411 }
412 }
413
414 /* Probe expansion ROM if present */
415 err = pci_read(domain, bus, dev, func, PCI_MAPREG_ROM, ®);
416 if (err)
417 return err;
418 if (reg != 0) {
419 err = pci_write(domain, bus, dev, func, PCI_MAPREG_ROM,
420 (uint32_t)(~PCI_MAPREG_ROM_ENABLE));
421 if (err)
422 return err;
423 pci_read(domain, bus, dev, func, PCI_MAPREG_ROM, &size);
424 pci_write(domain, bus, dev, func, PCI_MAPREG_ROM, reg);
425 if ((reg & PCI_MAPREG_MEM_ADDR_MASK) != 0) {
426 priv->rom_base = reg & PCI_MAPREG_MEM_ADDR_MASK;
427 device->rom_size = -(size & PCI_MAPREG_MEM_ADDR_MASK);
428 }
429 }
430
431 return 0;
432 }
433
434 /**
435 * Read a VGA rom using the 0xc0000 mapping.
436 *
437 * This function should be extended to handle access through PCI resources,
438 * which should be more reliable when available.
439 */
440 static int
pci_device_netbsd_read_rom(struct pci_device * dev,void * buffer)441 pci_device_netbsd_read_rom(struct pci_device *dev, void *buffer)
442 {
443 struct pci_device_private *priv = (struct pci_device_private *)(void *)dev;
444 void *bios;
445 pciaddr_t rom_base;
446 size_t rom_size;
447 uint32_t bios_val, command_val;
448 int pci_rom;
449
450 if (((priv->base.device_class >> 16) & 0xff) != PCI_CLASS_DISPLAY ||
451 ((priv->base.device_class >> 8) & 0xff) != PCI_SUBCLASS_DISPLAY_VGA)
452 return ENOSYS;
453
454 if (priv->rom_base == 0) {
455 #if defined(__amd64__) || defined(__i386__)
456 /*
457 * We need a way to detect when this isn't the console and reject
458 * this request outright.
459 */
460 rom_base = 0xc0000;
461 rom_size = 0x10000;
462 pci_rom = 0;
463 #else
464 return ENOSYS;
465 #endif
466 } else {
467 rom_base = priv->rom_base;
468 rom_size = dev->rom_size;
469 pci_rom = 1;
470 if ((pcibus_conf_read(buses[dev->domain].fd, (unsigned int)dev->bus,
471 (unsigned int)dev->dev, (unsigned int)dev->func,
472 PCI_COMMAND_STATUS_REG, &command_val)) == -1)
473 return errno;
474 if ((command_val & PCI_COMMAND_MEM_ENABLE) == 0) {
475 if ((pcibus_conf_write(buses[dev->domain].fd,
476 (unsigned int)dev->bus, (unsigned int)dev->dev,
477 (unsigned int)dev->func, PCI_COMMAND_STATUS_REG,
478 command_val | PCI_COMMAND_MEM_ENABLE)) == -1)
479 return errno;
480 }
481 if ((pcibus_conf_read(buses[dev->domain].fd, (unsigned int)dev->bus,
482 (unsigned int)dev->dev, (unsigned int)dev->func,
483 PCI_MAPREG_ROM, &bios_val)) == -1)
484 return errno;
485 if ((bios_val & PCI_MAPREG_ROM_ENABLE) == 0) {
486 if ((pcibus_conf_write(buses[dev->domain].fd,
487 (unsigned int)dev->bus,
488 (unsigned int)dev->dev, (unsigned int)dev->func,
489 PCI_MAPREG_ROM, bios_val | PCI_MAPREG_ROM_ENABLE)) == -1)
490 return errno;
491 }
492 }
493
494 fprintf(stderr, "Using rom_base = 0x%lx 0x%lx (pci_rom=%d)\n",
495 (long)rom_base, (long)rom_size, pci_rom);
496
497 bios = mmap(NULL, rom_size, PROT_READ, MAP_SHARED, buses[dev->domain].fd,
498 (off_t)rom_base);
499 if (bios == MAP_FAILED) {
500 int serrno = errno;
501 return serrno;
502 }
503
504 memcpy(buffer, bios, rom_size);
505
506 munmap(bios, rom_size);
507
508 if (pci_rom) {
509 if ((command_val & PCI_COMMAND_MEM_ENABLE) == 0) {
510 if ((pcibus_conf_write(buses[dev->domain].fd,
511 (unsigned int)dev->bus,
512 (unsigned int)dev->dev, (unsigned int)dev->func,
513 PCI_COMMAND_STATUS_REG, command_val)) == -1)
514 return errno;
515 }
516 if ((bios_val & PCI_MAPREG_ROM_ENABLE) == 0) {
517 if ((pcibus_conf_write(buses[dev->domain].fd,
518 (unsigned int)dev->bus,
519 (unsigned int)dev->dev, (unsigned int)dev->func,
520 PCI_MAPREG_ROM, bios_val)) == -1)
521 return errno;
522 }
523 }
524
525 return 0;
526 }
527
528 #if defined(__i386__) || defined(__amd64__)
529 #include <machine/sysarch.h>
530
531 /*
532 * Functions to provide access to x86 programmed I/O instructions.
533 *
534 * The in[bwl]() and out[bwl]() functions are split into two varieties: one to
535 * use a small, constant, 8-bit port number, and another to use a large or
536 * variable port number. The former can be compiled as a smaller instruction.
537 */
538
539
540 #ifdef __OPTIMIZE__
541
542 #define __use_immediate_port(port) \
543 (__builtin_constant_p((port)) && (port) < 0x100)
544
545 #else
546
547 #define __use_immediate_port(port) 0
548
549 #endif
550
551
552 #define inb(port) \
553 (/* CONSTCOND */ __use_immediate_port(port) ? __inbc(port) : __inb(port))
554
555 static __inline u_int8_t
__inbc(unsigned port)556 __inbc(unsigned port)
557 {
558 u_int8_t data;
559 __asm __volatile("inb %w1,%0" : "=a" (data) : "id" (port));
560 return data;
561 }
562
563 static __inline u_int8_t
__inb(unsigned port)564 __inb(unsigned port)
565 {
566 u_int8_t data;
567 __asm __volatile("inb %w1,%0" : "=a" (data) : "d" (port));
568 return data;
569 }
570
571 static __inline void
insb(unsigned port,void * addr,int cnt)572 insb(unsigned port, void *addr, int cnt)
573 {
574 void *dummy1;
575 int dummy2;
576 __asm __volatile("cld\n\trepne\n\tinsb" :
577 "=D" (dummy1), "=c" (dummy2) :
578 "d" (port), "0" (addr), "1" (cnt) :
579 "memory");
580 }
581
582 #define inw(port) \
583 (/* CONSTCOND */ __use_immediate_port(port) ? __inwc(port) : __inw(port))
584
585 static __inline u_int16_t
__inwc(unsigned port)586 __inwc(unsigned port)
587 {
588 u_int16_t data;
589 __asm __volatile("inw %w1,%0" : "=a" (data) : "id" (port));
590 return data;
591 }
592
593 static __inline u_int16_t
__inw(unsigned port)594 __inw(unsigned port)
595 {
596 u_int16_t data;
597 __asm __volatile("inw %w1,%0" : "=a" (data) : "d" (port));
598 return data;
599 }
600
601 static __inline void
insw(unsigned port,void * addr,int cnt)602 insw(unsigned port, void *addr, int cnt)
603 {
604 void *dummy1;
605 int dummy2;
606 __asm __volatile("cld\n\trepne\n\tinsw" :
607 "=D" (dummy1), "=c" (dummy2) :
608 "d" (port), "0" (addr), "1" (cnt) :
609 "memory");
610 }
611
612 #define inl(port) \
613 (/* CONSTCOND */ __use_immediate_port(port) ? __inlc(port) : __inl(port))
614
615 static __inline u_int32_t
__inlc(unsigned port)616 __inlc(unsigned port)
617 {
618 u_int32_t data;
619 __asm __volatile("inl %w1,%0" : "=a" (data) : "id" (port));
620 return data;
621 }
622
623 static __inline u_int32_t
__inl(unsigned port)624 __inl(unsigned port)
625 {
626 u_int32_t data;
627 __asm __volatile("inl %w1,%0" : "=a" (data) : "d" (port));
628 return data;
629 }
630
631 static __inline void
insl(unsigned port,void * addr,int cnt)632 insl(unsigned port, void *addr, int cnt)
633 {
634 void *dummy1;
635 int dummy2;
636 __asm __volatile("cld\n\trepne\n\tinsl" :
637 "=D" (dummy1), "=c" (dummy2) :
638 "d" (port), "0" (addr), "1" (cnt) :
639 "memory");
640 }
641
642 #define outb(port, data) \
643 (/* CONSTCOND */__use_immediate_port(port) ? __outbc(port, data) : \
644 __outb(port, data))
645
646 static __inline void
__outbc(unsigned port,u_int8_t data)647 __outbc(unsigned port, u_int8_t data)
648 {
649 __asm __volatile("outb %0,%w1" : : "a" (data), "id" (port));
650 }
651
652 static __inline void
__outb(unsigned port,u_int8_t data)653 __outb(unsigned port, u_int8_t data)
654 {
655 __asm __volatile("outb %0,%w1" : : "a" (data), "d" (port));
656 }
657
658 static __inline void
outsb(unsigned port,const void * addr,int cnt)659 outsb(unsigned port, const void *addr, int cnt)
660 {
661 void *dummy1;
662 int dummy2;
663 __asm __volatile("cld\n\trepne\n\toutsb" :
664 "=S" (dummy1), "=c" (dummy2) :
665 "d" (port), "0" (addr), "1" (cnt));
666 }
667
668 #define outw(port, data) \
669 (/* CONSTCOND */ __use_immediate_port(port) ? __outwc(port, data) : \
670 __outw(port, data))
671
672 static __inline void
__outwc(unsigned port,u_int16_t data)673 __outwc(unsigned port, u_int16_t data)
674 {
675 __asm __volatile("outw %0,%w1" : : "a" (data), "id" (port));
676 }
677
678 static __inline void
__outw(unsigned port,u_int16_t data)679 __outw(unsigned port, u_int16_t data)
680 {
681 __asm __volatile("outw %0,%w1" : : "a" (data), "d" (port));
682 }
683
684 static __inline void
outsw(unsigned port,const void * addr,int cnt)685 outsw(unsigned port, const void *addr, int cnt)
686 {
687 void *dummy1;
688 int dummy2;
689 __asm __volatile("cld\n\trepne\n\toutsw" :
690 "=S" (dummy1), "=c" (dummy2) :
691 "d" (port), "0" (addr), "1" (cnt));
692 }
693
694 #define outl(port, data) \
695 (/* CONSTCOND */ __use_immediate_port(port) ? __outlc(port, data) : \
696 __outl(port, data))
697
698 static __inline void
__outlc(unsigned port,u_int32_t data)699 __outlc(unsigned port, u_int32_t data)
700 {
701 __asm __volatile("outl %0,%w1" : : "a" (data), "id" (port));
702 }
703
704 static __inline void
__outl(unsigned port,u_int32_t data)705 __outl(unsigned port, u_int32_t data)
706 {
707 __asm __volatile("outl %0,%w1" : : "a" (data), "d" (port));
708 }
709
710 static __inline void
outsl(unsigned port,const void * addr,int cnt)711 outsl(unsigned port, const void *addr, int cnt)
712 {
713 void *dummy1;
714 int dummy2;
715 __asm __volatile("cld\n\trepne\n\toutsl" :
716 "=S" (dummy1), "=c" (dummy2) :
717 "d" (port), "0" (addr), "1" (cnt));
718 }
719
720 #endif
721
722
723 static struct pci_io_handle *
pci_device_netbsd_open_legacy_io(struct pci_io_handle * ret,struct pci_device * dev,pciaddr_t base,pciaddr_t size)724 pci_device_netbsd_open_legacy_io(struct pci_io_handle *ret,
725 struct pci_device *dev, pciaddr_t base, pciaddr_t size)
726 {
727 #if defined(__i386__)
728 struct i386_iopl_args ia;
729
730 ia.iopl = 1;
731 if (sysarch(I386_IOPL, &ia))
732 return NULL;
733
734 ret->base = base;
735 ret->size = size;
736 ret->is_legacy = 1;
737 return ret;
738 #elif defined(__amd64__)
739 struct x86_64_iopl_args ia;
740
741 ia.iopl = 1;
742 if (sysarch(X86_64_IOPL, &ia))
743 return NULL;
744
745 ret->base = base;
746 ret->size = size;
747 ret->is_legacy = 1;
748 return ret;
749 #else
750 return NULL;
751 #endif
752 }
753
754 static uint32_t
pci_device_netbsd_read32(struct pci_io_handle * handle,uint32_t reg)755 pci_device_netbsd_read32(struct pci_io_handle *handle, uint32_t reg)
756 {
757 #if defined(__i386__) || defined(__amd64__)
758 return inl(handle->base + reg);
759 #else
760 return *(uint32_t *)((uintptr_t)handle->memory + reg);
761 #endif
762 }
763
764 static uint16_t
pci_device_netbsd_read16(struct pci_io_handle * handle,uint32_t reg)765 pci_device_netbsd_read16(struct pci_io_handle *handle, uint32_t reg)
766 {
767 #if defined(__i386__) || defined(__amd64__)
768 return inw(handle->base + reg);
769 #else
770 return *(uint16_t *)((uintptr_t)handle->memory + reg);
771 #endif
772 }
773
774 static uint8_t
pci_device_netbsd_read8(struct pci_io_handle * handle,uint32_t reg)775 pci_device_netbsd_read8(struct pci_io_handle *handle, uint32_t reg)
776 {
777 #if defined(__i386__) || defined(__amd64__)
778 return inb(handle->base + reg);
779 #else
780 return *(uint8_t *)((uintptr_t)handle->memory + reg);
781 #endif
782 }
783
784 static void
pci_device_netbsd_write32(struct pci_io_handle * handle,uint32_t reg,uint32_t data)785 pci_device_netbsd_write32(struct pci_io_handle *handle, uint32_t reg,
786 uint32_t data)
787 {
788 #if defined(__i386__) || defined(__amd64__)
789 outl(handle->base + reg, data);
790 #else
791 *(uint16_t *)((uintptr_t)handle->memory + reg) = data;
792 #endif
793 }
794
795 static void
pci_device_netbsd_write16(struct pci_io_handle * handle,uint32_t reg,uint16_t data)796 pci_device_netbsd_write16(struct pci_io_handle *handle, uint32_t reg,
797 uint16_t data)
798 {
799 #if defined(__i386__) || defined(__amd64__)
800 outw(handle->base + reg, data);
801 #else
802 *(uint8_t *)((uintptr_t)handle->memory + reg) = data;
803 #endif
804 }
805
806 static void
pci_device_netbsd_write8(struct pci_io_handle * handle,uint32_t reg,uint8_t data)807 pci_device_netbsd_write8(struct pci_io_handle *handle, uint32_t reg,
808 uint8_t data)
809 {
810 #if defined(__i386__) || defined(__amd64__)
811 outb(handle->base + reg, data);
812 #else
813 *(uint32_t *)((uintptr_t)handle->memory + reg) = data;
814 #endif
815 }
816
817 static int
pci_device_netbsd_map_legacy(struct pci_device * dev,pciaddr_t base,pciaddr_t size,unsigned map_flags,void ** addr)818 pci_device_netbsd_map_legacy(struct pci_device *dev, pciaddr_t base,
819 pciaddr_t size, unsigned map_flags, void **addr)
820 {
821 struct pci_device_mapping map;
822 int err;
823
824 map.base = base;
825 map.size = size;
826 map.flags = map_flags;
827 map.memory = NULL;
828 err = pci_device_netbsd_map_range(dev, &map);
829 *addr = map.memory;
830
831 return err;
832 }
833
834 static int
pci_device_netbsd_unmap_legacy(struct pci_device * dev,void * addr,pciaddr_t size)835 pci_device_netbsd_unmap_legacy(struct pci_device *dev, void *addr,
836 pciaddr_t size)
837 {
838 struct pci_device_mapping map;
839
840 map.memory = addr;
841 map.size = size;
842 map.flags = 0;
843 return pci_device_netbsd_unmap_range(dev, &map);
844 }
845
846 static int
pci_device_netbsd_has_kernel_driver(struct pci_device * dev)847 pci_device_netbsd_has_kernel_driver(struct pci_device *dev)
848 {
849 #ifdef PCI_IOC_DRVNAME
850 /*
851 * NetBSD PCI_IOC_DRVNAME appears at the same time as pci_drvname(3)
852 */
853 char drvname[16];
854
855 if (dev->bus >= nbuses)
856 return 0;
857
858 /*
859 * vga(4) should be considered "not bound".
860 */
861 if (pci_drvname(buses[dev->bus].fd, dev->dev, dev->func,
862 drvname, sizeof drvname) == 0 &&
863 strncmp(drvname, "vga", 3) != 0)
864 return 1;
865 #endif
866 return 0;
867 }
868
869 static const struct pci_system_methods netbsd_pci_methods = {
870 .destroy = pci_system_netbsd_destroy,
871 .destroy_device = NULL,
872 .read_rom = pci_device_netbsd_read_rom,
873 .probe = pci_device_netbsd_probe,
874 .map_range = pci_device_netbsd_map_range,
875 .unmap_range = pci_device_netbsd_unmap_range,
876 .read = pci_device_netbsd_read,
877 .write = pci_device_netbsd_write,
878 .fill_capabilities = pci_fill_capabilities_generic,
879 #if defined(WSDISPLAYIO_GET_BUSID)
880 .boot_vga = pci_device_netbsd_boot_vga,
881 #else
882 .boot_vga = NULL,
883 #endif
884 .open_legacy_io = pci_device_netbsd_open_legacy_io,
885 .read32 = pci_device_netbsd_read32,
886 .read16 = pci_device_netbsd_read16,
887 .read8 = pci_device_netbsd_read8,
888 .write32 = pci_device_netbsd_write32,
889 .write16 = pci_device_netbsd_write16,
890 .write8 = pci_device_netbsd_write8,
891 .map_legacy = pci_device_netbsd_map_legacy,
892 .unmap_legacy = pci_device_netbsd_unmap_legacy,
893 .has_kernel_driver = pci_device_netbsd_has_kernel_driver,
894 };
895
896 int
pci_system_netbsd_create(void)897 pci_system_netbsd_create(void)
898 {
899 struct pci_device_private *device;
900 int bus, dev, func, ndevs, nfuncs, domain, pcifd;
901 uint32_t reg;
902 char netbsd_devname[32];
903 struct pciio_businfo businfo;
904
905 pci_sys = calloc(1, sizeof(struct pci_system));
906
907 pci_sys->methods = &netbsd_pci_methods;
908
909 ndevs = 0;
910 nbuses = 0;
911 snprintf(netbsd_devname, 32, "/dev/pci%d", nbuses);
912 pcifd = open(netbsd_devname, O_RDWR | O_CLOEXEC);
913 while (pcifd > 0) {
914 ioctl(pcifd, PCI_IOC_BUSINFO, &businfo);
915 buses[nbuses].fd = pcifd;
916 buses[nbuses].num = bus = businfo.busno;
917 buses[nbuses].maxdevs = businfo.maxdevs;
918 domain = nbuses;
919 nbuses++;
920 for (dev = 0; dev < businfo.maxdevs; dev++) {
921 nfuncs = pci_nfuncs(domain, bus, dev);
922 for (func = 0; func < nfuncs; func++) {
923 if (pci_read(domain, bus, dev, func, PCI_ID_REG,
924 ®) != 0)
925 continue;
926 if (PCI_VENDOR(reg) == PCI_VENDOR_INVALID ||
927 PCI_VENDOR(reg) == 0)
928 continue;
929
930 ndevs++;
931 }
932 }
933 snprintf(netbsd_devname, 32, "/dev/pci%d", nbuses);
934 pcifd = open(netbsd_devname, O_RDWR);
935 }
936
937 pci_sys->num_devices = ndevs;
938 pci_sys->devices = calloc(ndevs, sizeof(struct pci_device_private));
939 if (pci_sys->devices == NULL) {
940 int i;
941
942 for (i = 0; i < nbuses; i++)
943 close(buses[i].fd);
944 free(pci_sys);
945 return ENOMEM;
946 }
947
948 device = pci_sys->devices;
949 for (domain = 0; domain < nbuses; domain++) {
950 bus = buses[domain].num;
951 for (dev = 0; dev < buses[domain].maxdevs; dev++) {
952 nfuncs = pci_nfuncs(domain, bus, dev);
953 for (func = 0; func < nfuncs; func++) {
954 if (pci_read(domain, bus, dev, func,
955 PCI_ID_REG, ®) != 0)
956 continue;
957 if (PCI_VENDOR(reg) == PCI_VENDOR_INVALID ||
958 PCI_VENDOR(reg) == 0)
959 continue;
960
961 device->base.domain = domain;
962 if (domain > 0xffff)
963 device->base.domain_16 = 0xffff;
964 else
965 device->base.domain_16 = domain & 0xffff;
966 device->base.bus = bus;
967 device->base.dev = dev;
968 device->base.func = func;
969 device->base.vendor_id = PCI_VENDOR(reg);
970 device->base.device_id = PCI_PRODUCT(reg);
971
972 if (pci_read(domain, bus, dev, func,
973 PCI_CLASS_REG, ®) != 0)
974 continue;
975
976 device->base.device_class =
977 PCI_INTERFACE(reg) | PCI_CLASS(reg) << 16 |
978 PCI_SUBCLASS(reg) << 8;
979 device->base.revision = PCI_REVISION(reg);
980
981 if (pci_read(domain, bus, dev, func,
982 PCI_SUBSYS_ID_REG, ®) != 0)
983 continue;
984
985 device->base.subvendor_id = PCI_VENDOR(reg);
986 device->base.subdevice_id = PCI_PRODUCT(reg);
987
988 device++;
989 }
990 }
991 }
992
993 return 0;
994 }
995