1 /*-
2 * Copyright (c) 2015, 2020 Ruslan Bukin <br@bsdpad.com>
3 * Copyright (c) 2014 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * This software was developed by Semihalf under
7 * the sponsorship of the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /* Generic ECAM PCIe driver */
32
33 #include <sys/cdefs.h>
34 #include "opt_platform.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
40 #include <sys/rman.h>
41 #include <sys/module.h>
42 #include <sys/bus.h>
43 #include <sys/endian.h>
44
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcireg.h>
47 #include <dev/pci/pcib_private.h>
48 #include <dev/pci/pci_host_generic.h>
49
50 #include <machine/bus.h>
51 #include <machine/intr.h>
52
53 #include "pcib_if.h"
54
55 #if defined(VM_MEMATTR_DEVICE_NP)
56 #define PCI_UNMAPPED
57 #define PCI_RF_FLAGS RF_UNMAPPED
58 #else
59 #define PCI_RF_FLAGS 0
60 #endif
61
62
63 /* Forward prototypes */
64
65 static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot,
66 u_int func, u_int reg, int bytes);
67 static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot,
68 u_int func, u_int reg, uint32_t val, int bytes);
69 static int generic_pcie_maxslots(device_t dev);
70 static int generic_pcie_read_ivar(device_t dev, device_t child, int index,
71 uintptr_t *result);
72 static int generic_pcie_write_ivar(device_t dev, device_t child, int index,
73 uintptr_t value);
74
75 int
pci_host_generic_core_attach(device_t dev)76 pci_host_generic_core_attach(device_t dev)
77 {
78 #ifdef PCI_UNMAPPED
79 struct resource_map_request req;
80 struct resource_map map;
81 #endif
82 struct generic_pcie_core_softc *sc;
83 struct rman *rm;
84 uint64_t phys_base;
85 uint64_t pci_base;
86 uint64_t size;
87 const char *range_descr;
88 char buf[64];
89 int domain, error;
90 int flags, rid, tuple, type;
91
92 sc = device_get_softc(dev);
93 sc->dev = dev;
94
95 /* Create the parent DMA tag to pass down the coherent flag */
96 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
97 1, 0, /* alignment, bounds */
98 BUS_SPACE_MAXADDR, /* lowaddr */
99 BUS_SPACE_MAXADDR, /* highaddr */
100 NULL, NULL, /* filter, filterarg */
101 BUS_SPACE_MAXSIZE, /* maxsize */
102 BUS_SPACE_UNRESTRICTED, /* nsegments */
103 BUS_SPACE_MAXSIZE, /* maxsegsize */
104 sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */
105 NULL, NULL, /* lockfunc, lockarg */
106 &sc->dmat);
107 if (error != 0)
108 return (error);
109
110 /*
111 * Attempt to set the domain. If it's missing, or we are unable to
112 * set it then memory allocations may be placed in the wrong domain.
113 */
114 if (bus_get_domain(dev, &domain) == 0)
115 (void)bus_dma_tag_set_domain(sc->dmat, domain);
116
117 if ((sc->quirks & PCIE_CUSTOM_CONFIG_SPACE_QUIRK) == 0) {
118 rid = 0;
119 sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
120 PCI_RF_FLAGS | RF_ACTIVE);
121 if (sc->res == NULL) {
122 device_printf(dev, "could not allocate memory.\n");
123 error = ENXIO;
124 goto err_resource;
125 }
126 #ifdef PCI_UNMAPPED
127 resource_init_map_request(&req);
128 req.memattr = VM_MEMATTR_DEVICE_NP;
129 error = bus_map_resource(dev, SYS_RES_MEMORY, sc->res, &req,
130 &map);
131 if (error != 0) {
132 device_printf(dev, "could not map memory.\n");
133 return (error);
134 }
135 rman_set_mapping(sc->res, &map);
136 #endif
137 }
138
139 sc->has_pmem = false;
140 sc->pmem_rman.rm_type = RMAN_ARRAY;
141 snprintf(buf, sizeof(buf), "%s prefetch window",
142 device_get_nameunit(dev));
143 sc->pmem_rman.rm_descr = strdup(buf, M_DEVBUF);
144
145 sc->mem_rman.rm_type = RMAN_ARRAY;
146 snprintf(buf, sizeof(buf), "%s memory window",
147 device_get_nameunit(dev));
148 sc->mem_rman.rm_descr = strdup(buf, M_DEVBUF);
149
150 sc->io_rman.rm_type = RMAN_ARRAY;
151 snprintf(buf, sizeof(buf), "%s I/O port window",
152 device_get_nameunit(dev));
153 sc->io_rman.rm_descr = strdup(buf, M_DEVBUF);
154
155 /* Initialize rman and allocate memory regions */
156 error = rman_init(&sc->pmem_rman);
157 if (error) {
158 device_printf(dev, "rman_init() failed. error = %d\n", error);
159 goto err_pmem_rman;
160 }
161
162 error = rman_init(&sc->mem_rman);
163 if (error) {
164 device_printf(dev, "rman_init() failed. error = %d\n", error);
165 goto err_mem_rman;
166 }
167
168 error = rman_init(&sc->io_rman);
169 if (error) {
170 device_printf(dev, "rman_init() failed. error = %d\n", error);
171 goto err_io_rman;
172 }
173
174 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
175 phys_base = sc->ranges[tuple].phys_base;
176 pci_base = sc->ranges[tuple].pci_base;
177 size = sc->ranges[tuple].size;
178 rid = tuple + 1;
179 if (size == 0)
180 continue; /* empty range element */
181 switch (FLAG_TYPE(sc->ranges[tuple].flags)) {
182 case FLAG_TYPE_PMEM:
183 sc->has_pmem = true;
184 range_descr = "prefetch";
185 flags = RF_PREFETCHABLE;
186 type = SYS_RES_MEMORY;
187 rm = &sc->pmem_rman;
188 break;
189 case FLAG_TYPE_MEM:
190 range_descr = "memory";
191 flags = 0;
192 type = SYS_RES_MEMORY;
193 rm = &sc->mem_rman;
194 break;
195 case FLAG_TYPE_IO:
196 range_descr = "I/O port";
197 flags = 0;
198 type = SYS_RES_IOPORT;
199 rm = &sc->io_rman;
200 break;
201 default:
202 continue;
203 }
204 if (bootverbose)
205 device_printf(dev,
206 "PCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx, Type: %s\n",
207 pci_base, phys_base, size, range_descr);
208 error = bus_set_resource(dev, type, rid, phys_base, size);
209 if (error != 0) {
210 device_printf(dev,
211 "failed to set resource for range %d: %d\n", tuple,
212 error);
213 continue;
214 }
215 sc->ranges[tuple].res = bus_alloc_resource_any(dev, type, &rid,
216 RF_ACTIVE | RF_UNMAPPED | flags);
217 if (sc->ranges[tuple].res == NULL) {
218 device_printf(dev,
219 "failed to allocate resource for range %d\n", tuple);
220 continue;
221 }
222 error = rman_manage_region(rm, pci_base, pci_base + size - 1);
223 if (error) {
224 device_printf(dev, "rman_manage_region() failed."
225 "error = %d\n", error);
226 continue;
227 }
228 }
229
230 return (0);
231
232 err_io_rman:
233 rman_fini(&sc->mem_rman);
234 err_mem_rman:
235 rman_fini(&sc->pmem_rman);
236 err_pmem_rman:
237 free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF);
238 free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF);
239 free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF);
240 if (sc->res != NULL)
241 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res);
242 err_resource:
243 bus_dma_tag_destroy(sc->dmat);
244 return (error);
245 }
246
247 int
pci_host_generic_core_detach(device_t dev)248 pci_host_generic_core_detach(device_t dev)
249 {
250 struct generic_pcie_core_softc *sc;
251 int error, tuple, type;
252
253 sc = device_get_softc(dev);
254
255 error = bus_generic_detach(dev);
256 if (error != 0)
257 return (error);
258
259 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
260 if (sc->ranges[tuple].size == 0)
261 continue; /* empty range element */
262 switch (FLAG_TYPE(sc->ranges[tuple].flags)) {
263 case FLAG_TYPE_PMEM:
264 case FLAG_TYPE_MEM:
265 type = SYS_RES_MEMORY;
266 break;
267 case FLAG_TYPE_IO:
268 type = SYS_RES_IOPORT;
269 break;
270 default:
271 continue;
272 }
273 if (sc->ranges[tuple].res != NULL)
274 bus_release_resource(dev, type, tuple + 1,
275 sc->ranges[tuple].res);
276 bus_delete_resource(dev, type, tuple + 1);
277 }
278 rman_fini(&sc->io_rman);
279 rman_fini(&sc->mem_rman);
280 rman_fini(&sc->pmem_rman);
281 free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF);
282 free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF);
283 free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF);
284 if (sc->res != NULL)
285 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res);
286 bus_dma_tag_destroy(sc->dmat);
287
288 return (0);
289 }
290
291 static uint32_t
generic_pcie_read_config(device_t dev,u_int bus,u_int slot,u_int func,u_int reg,int bytes)292 generic_pcie_read_config(device_t dev, u_int bus, u_int slot,
293 u_int func, u_int reg, int bytes)
294 {
295 struct generic_pcie_core_softc *sc;
296 uint64_t offset;
297 uint32_t data;
298
299 sc = device_get_softc(dev);
300 if ((bus < sc->bus_start) || (bus > sc->bus_end))
301 return (~0U);
302 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) ||
303 (reg > PCIE_REGMAX))
304 return (~0U);
305 if ((sc->quirks & PCIE_ECAM_DESIGNWARE_QUIRK) && bus == 0 && slot > 0)
306 return (~0U);
307
308 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
309
310 switch (bytes) {
311 case 1:
312 data = bus_read_1(sc->res, offset);
313 break;
314 case 2:
315 data = le16toh(bus_read_2(sc->res, offset));
316 break;
317 case 4:
318 data = le32toh(bus_read_4(sc->res, offset));
319 break;
320 default:
321 return (~0U);
322 }
323
324 return (data);
325 }
326
327 static void
generic_pcie_write_config(device_t dev,u_int bus,u_int slot,u_int func,u_int reg,uint32_t val,int bytes)328 generic_pcie_write_config(device_t dev, u_int bus, u_int slot,
329 u_int func, u_int reg, uint32_t val, int bytes)
330 {
331 struct generic_pcie_core_softc *sc;
332 uint64_t offset;
333
334 sc = device_get_softc(dev);
335 if ((bus < sc->bus_start) || (bus > sc->bus_end))
336 return;
337 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) ||
338 (reg > PCIE_REGMAX))
339 return;
340
341 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
342
343 switch (bytes) {
344 case 1:
345 bus_write_1(sc->res, offset, val);
346 break;
347 case 2:
348 bus_write_2(sc->res, offset, htole16(val));
349 break;
350 case 4:
351 bus_write_4(sc->res, offset, htole32(val));
352 break;
353 default:
354 return;
355 }
356 }
357
358 static int
generic_pcie_maxslots(device_t dev)359 generic_pcie_maxslots(device_t dev)
360 {
361
362 return (31); /* max slots per bus acc. to standard */
363 }
364
365 static int
generic_pcie_read_ivar(device_t dev,device_t child,int index,uintptr_t * result)366 generic_pcie_read_ivar(device_t dev, device_t child, int index,
367 uintptr_t *result)
368 {
369 struct generic_pcie_core_softc *sc;
370
371 sc = device_get_softc(dev);
372
373 if (index == PCIB_IVAR_BUS) {
374 *result = sc->bus_start;
375 return (0);
376 }
377
378 if (index == PCIB_IVAR_DOMAIN) {
379 *result = sc->ecam;
380 return (0);
381 }
382
383 if (bootverbose)
384 device_printf(dev, "ERROR: Unknown index %d.\n", index);
385 return (ENOENT);
386 }
387
388 static int
generic_pcie_write_ivar(device_t dev,device_t child,int index,uintptr_t value)389 generic_pcie_write_ivar(device_t dev, device_t child, int index,
390 uintptr_t value)
391 {
392
393 return (ENOENT);
394 }
395
396 static struct rman *
generic_pcie_get_rman(device_t dev,int type,u_int flags)397 generic_pcie_get_rman(device_t dev, int type, u_int flags)
398 {
399 struct generic_pcie_core_softc *sc = device_get_softc(dev);
400
401 switch (type) {
402 case SYS_RES_IOPORT:
403 return (&sc->io_rman);
404 case SYS_RES_MEMORY:
405 if (sc->has_pmem && (flags & RF_PREFETCHABLE) != 0)
406 return (&sc->pmem_rman);
407 return (&sc->mem_rman);
408 default:
409 break;
410 }
411
412 return (NULL);
413 }
414
415 int
pci_host_generic_core_release_resource(device_t dev,device_t child,struct resource * res)416 pci_host_generic_core_release_resource(device_t dev, device_t child,
417 struct resource *res)
418 {
419 struct generic_pcie_core_softc *sc;
420
421 sc = device_get_softc(dev);
422 switch (rman_get_type(res)) {
423 case PCI_RES_BUS:
424 return (pci_domain_release_bus(sc->ecam, child, res));
425 case SYS_RES_IOPORT:
426 case SYS_RES_MEMORY:
427 return (bus_generic_rman_release_resource(dev, child, res));
428 default:
429 return (bus_generic_release_resource(dev, child, res));
430 }
431 }
432
433 static struct pcie_range *
generic_pcie_containing_range(device_t dev,int type,rman_res_t start,rman_res_t end)434 generic_pcie_containing_range(device_t dev, int type, rman_res_t start,
435 rman_res_t end)
436 {
437 struct generic_pcie_core_softc *sc = device_get_softc(dev);
438 uint64_t pci_base;
439 uint64_t size;
440 int i, space;
441
442 switch (type) {
443 case SYS_RES_IOPORT:
444 case SYS_RES_MEMORY:
445 break;
446 default:
447 return (NULL);
448 }
449
450 for (i = 0; i < MAX_RANGES_TUPLES; i++) {
451 pci_base = sc->ranges[i].pci_base;
452 size = sc->ranges[i].size;
453 if (size == 0)
454 continue; /* empty range element */
455
456 if (start < pci_base || end >= pci_base + size)
457 continue;
458
459 switch (FLAG_TYPE(sc->ranges[i].flags)) {
460 case FLAG_TYPE_MEM:
461 case FLAG_TYPE_PMEM:
462 space = SYS_RES_MEMORY;
463 break;
464 case FLAG_TYPE_IO:
465 space = SYS_RES_IOPORT;
466 break;
467 default:
468 continue;
469 }
470
471 if (type == space)
472 return (&sc->ranges[i]);
473 }
474 return (NULL);
475 }
476
477 static int
generic_pcie_translate_resource(device_t dev,int type,rman_res_t start,rman_res_t * new_start)478 generic_pcie_translate_resource(device_t dev, int type, rman_res_t start,
479 rman_res_t *new_start)
480 {
481 struct pcie_range *range;
482
483 /* Translate the address from a PCI address to a physical address */
484 switch (type) {
485 case SYS_RES_IOPORT:
486 case SYS_RES_MEMORY:
487 range = generic_pcie_containing_range(dev, type, start, start);
488 if (range == NULL)
489 return (ENOENT);
490 *new_start = start - range->pci_base + range->phys_base;
491 break;
492 default:
493 /* No translation for non-memory types */
494 *new_start = start;
495 break;
496 }
497
498 return (0);
499 }
500
501 struct resource *
pci_host_generic_core_alloc_resource(device_t dev,device_t child,int type,int * rid,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags)502 pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type,
503 int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
504 {
505 struct generic_pcie_core_softc *sc;
506 struct resource *res;
507
508 sc = device_get_softc(dev);
509
510 switch (type) {
511 case PCI_RES_BUS:
512 res = pci_domain_alloc_bus(sc->ecam, child, rid, start, end,
513 count, flags);
514 break;
515 case SYS_RES_IOPORT:
516 case SYS_RES_MEMORY:
517 res = bus_generic_rman_alloc_resource(dev, child, type, rid,
518 start, end, count, flags);
519 break;
520 default:
521 res = bus_generic_alloc_resource(dev, child, type, rid, start,
522 end, count, flags);
523 break;
524 }
525 if (res == NULL) {
526 device_printf(dev, "%s FAIL: type=%d, rid=%d, "
527 "start=%016jx, end=%016jx, count=%016jx, flags=%x\n",
528 __func__, type, *rid, start, end, count, flags);
529 }
530 return (res);
531 }
532
533 static int
generic_pcie_activate_resource(device_t dev,device_t child,struct resource * r)534 generic_pcie_activate_resource(device_t dev, device_t child, struct resource *r)
535 {
536 struct generic_pcie_core_softc *sc;
537
538 sc = device_get_softc(dev);
539 switch (rman_get_type(r)) {
540 case PCI_RES_BUS:
541 return (pci_domain_activate_bus(sc->ecam, child, r));
542 case SYS_RES_IOPORT:
543 case SYS_RES_MEMORY:
544 return (bus_generic_rman_activate_resource(dev, child, r));
545 default:
546 return (bus_generic_activate_resource(dev, child, r));
547 }
548 }
549
550 static int
generic_pcie_deactivate_resource(device_t dev,device_t child,struct resource * r)551 generic_pcie_deactivate_resource(device_t dev, device_t child,
552 struct resource *r)
553 {
554 struct generic_pcie_core_softc *sc;
555
556 sc = device_get_softc(dev);
557 switch (rman_get_type(r)) {
558 case PCI_RES_BUS:
559 return (pci_domain_deactivate_bus(sc->ecam, child, r));
560 case SYS_RES_IOPORT:
561 case SYS_RES_MEMORY:
562 return (bus_generic_rman_deactivate_resource(dev, child, r));
563 default:
564 return (bus_generic_deactivate_resource(dev, child, r));
565 }
566 }
567
568 static int
generic_pcie_adjust_resource(device_t dev,device_t child,struct resource * res,rman_res_t start,rman_res_t end)569 generic_pcie_adjust_resource(device_t dev, device_t child,
570 struct resource *res, rman_res_t start, rman_res_t end)
571 {
572 struct generic_pcie_core_softc *sc;
573
574 sc = device_get_softc(dev);
575 switch (rman_get_type(res)) {
576 case PCI_RES_BUS:
577 return (pci_domain_adjust_bus(sc->ecam, child, res, start,
578 end));
579 case SYS_RES_IOPORT:
580 case SYS_RES_MEMORY:
581 return (bus_generic_rman_adjust_resource(dev, child, res,
582 start, end));
583 default:
584 return (bus_generic_adjust_resource(dev, child, res, start,
585 end));
586 }
587 }
588
589 static int
generic_pcie_map_resource(device_t dev,device_t child,struct resource * r,struct resource_map_request * argsp,struct resource_map * map)590 generic_pcie_map_resource(device_t dev, device_t child, struct resource *r,
591 struct resource_map_request *argsp, struct resource_map *map)
592 {
593 struct resource_map_request args;
594 struct pcie_range *range;
595 rman_res_t length, start;
596 int error, type;
597
598 type = rman_get_type(r);
599 switch (type) {
600 case PCI_RES_BUS:
601 return (EINVAL);
602 case SYS_RES_IOPORT:
603 case SYS_RES_MEMORY:
604 break;
605 default:
606 return (bus_generic_map_resource(dev, child, r, argsp, map));
607 }
608
609 /* Resources must be active to be mapped. */
610 if (!(rman_get_flags(r) & RF_ACTIVE))
611 return (ENXIO);
612
613 resource_init_map_request(&args);
614 error = resource_validate_map_request(r, argsp, &args, &start, &length);
615 if (error)
616 return (error);
617
618 range = generic_pcie_containing_range(dev, type, rman_get_start(r),
619 rman_get_end(r));
620 if (range == NULL || range->res == NULL)
621 return (ENOENT);
622
623 args.offset = start - range->pci_base;
624 args.length = length;
625 return (bus_map_resource(dev, range->res, &args, map));
626 }
627
628 static int
generic_pcie_unmap_resource(device_t dev,device_t child,struct resource * r,struct resource_map * map)629 generic_pcie_unmap_resource(device_t dev, device_t child, struct resource *r,
630 struct resource_map *map)
631 {
632 struct pcie_range *range;
633 int type;
634
635 type = rman_get_type(r);
636 switch (type) {
637 case PCI_RES_BUS:
638 return (EINVAL);
639 case SYS_RES_IOPORT:
640 case SYS_RES_MEMORY:
641 break;
642 default:
643 return (bus_generic_unmap_resource(dev, child, r, map));
644 }
645
646 range = generic_pcie_containing_range(dev, type, rman_get_start(r),
647 rman_get_end(r));
648 if (range == NULL || range->res == NULL)
649 return (ENOENT);
650 return (bus_unmap_resource(dev, range->res, map));
651 }
652
653 static bus_dma_tag_t
generic_pcie_get_dma_tag(device_t dev,device_t child)654 generic_pcie_get_dma_tag(device_t dev, device_t child)
655 {
656 struct generic_pcie_core_softc *sc;
657
658 sc = device_get_softc(dev);
659 return (sc->dmat);
660 }
661
662 static device_method_t generic_pcie_methods[] = {
663 DEVMETHOD(device_attach, pci_host_generic_core_attach),
664 DEVMETHOD(device_detach, pci_host_generic_core_detach),
665
666 DEVMETHOD(bus_get_rman, generic_pcie_get_rman),
667 DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar),
668 DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar),
669 DEVMETHOD(bus_alloc_resource, pci_host_generic_core_alloc_resource),
670 DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource),
671 DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource),
672 DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource),
673 DEVMETHOD(bus_release_resource, pci_host_generic_core_release_resource),
674 DEVMETHOD(bus_translate_resource, generic_pcie_translate_resource),
675 DEVMETHOD(bus_map_resource, generic_pcie_map_resource),
676 DEVMETHOD(bus_unmap_resource, generic_pcie_unmap_resource),
677 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
678 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
679
680 DEVMETHOD(bus_get_dma_tag, generic_pcie_get_dma_tag),
681
682 /* pcib interface */
683 DEVMETHOD(pcib_maxslots, generic_pcie_maxslots),
684 DEVMETHOD(pcib_read_config, generic_pcie_read_config),
685 DEVMETHOD(pcib_write_config, generic_pcie_write_config),
686
687 DEVMETHOD_END
688 };
689
690 DEFINE_CLASS_0(pcib, generic_pcie_core_driver,
691 generic_pcie_methods, sizeof(struct generic_pcie_core_softc));
692