1 /*-
2 * Copyright (c) 2015, 2020 Ruslan Bukin <br@bsdpad.com>
3 * Copyright (c) 2014 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * This software was developed by Semihalf under
7 * the sponsorship of the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /* Generic ECAM PCIe driver */
32
33 #include <sys/cdefs.h>
34 #include "opt_platform.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
40 #include <sys/rman.h>
41 #include <sys/module.h>
42 #include <sys/bus.h>
43 #include <sys/endian.h>
44
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcireg.h>
47 #include <dev/pci/pcib_private.h>
48 #include <dev/pci/pci_host_generic.h>
49
50 #include <machine/bus.h>
51 #include <machine/intr.h>
52
53 #include "pcib_if.h"
54
55 #if defined(VM_MEMATTR_DEVICE_NP)
56 #define PCI_UNMAPPED
57 #define PCI_RF_FLAGS RF_UNMAPPED
58 #else
59 #define PCI_RF_FLAGS 0
60 #endif
61
62
63 /* Forward prototypes */
64
65 static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot,
66 u_int func, u_int reg, int bytes);
67 static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot,
68 u_int func, u_int reg, uint32_t val, int bytes);
69 static int generic_pcie_maxslots(device_t dev);
70 static int generic_pcie_read_ivar(device_t dev, device_t child, int index,
71 uintptr_t *result);
72 static int generic_pcie_write_ivar(device_t dev, device_t child, int index,
73 uintptr_t value);
74
75 int
pci_host_generic_core_attach(device_t dev)76 pci_host_generic_core_attach(device_t dev)
77 {
78 #ifdef PCI_UNMAPPED
79 struct resource_map_request req;
80 struct resource_map map;
81 #endif
82 struct generic_pcie_core_softc *sc;
83 struct rman *rm;
84 uint64_t phys_base;
85 uint64_t pci_base;
86 uint64_t size;
87 const char *range_descr;
88 char buf[64];
89 int domain, error;
90 int flags, rid, tuple, type;
91
92 sc = device_get_softc(dev);
93 sc->dev = dev;
94
95 /* Create the parent DMA tag to pass down the coherent flag */
96 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
97 1, 0, /* alignment, bounds */
98 BUS_SPACE_MAXADDR, /* lowaddr */
99 BUS_SPACE_MAXADDR, /* highaddr */
100 NULL, NULL, /* filter, filterarg */
101 BUS_SPACE_MAXSIZE, /* maxsize */
102 BUS_SPACE_UNRESTRICTED, /* nsegments */
103 BUS_SPACE_MAXSIZE, /* maxsegsize */
104 sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */
105 NULL, NULL, /* lockfunc, lockarg */
106 &sc->dmat);
107 if (error != 0)
108 return (error);
109
110 /*
111 * Attempt to set the domain. If it's missing, or we are unable to
112 * set it then memory allocations may be placed in the wrong domain.
113 */
114 if (bus_get_domain(dev, &domain) == 0)
115 (void)bus_dma_tag_set_domain(sc->dmat, domain);
116
117 if ((sc->quirks & PCIE_CUSTOM_CONFIG_SPACE_QUIRK) == 0) {
118 rid = 0;
119 sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
120 PCI_RF_FLAGS | RF_ACTIVE);
121 if (sc->res == NULL) {
122 device_printf(dev, "could not allocate memory.\n");
123 error = ENXIO;
124 goto err_resource;
125 }
126 #ifdef PCI_UNMAPPED
127 resource_init_map_request(&req);
128 req.memattr = VM_MEMATTR_DEVICE_NP;
129 error = bus_map_resource(dev, SYS_RES_MEMORY, sc->res, &req,
130 &map);
131 if (error != 0) {
132 device_printf(dev, "could not map memory.\n");
133 return (error);
134 }
135 rman_set_mapping(sc->res, &map);
136 #endif
137 }
138
139 sc->has_pmem = false;
140 sc->pmem_rman.rm_type = RMAN_ARRAY;
141 snprintf(buf, sizeof(buf), "%s prefetch window",
142 device_get_nameunit(dev));
143 sc->pmem_rman.rm_descr = strdup(buf, M_DEVBUF);
144
145 sc->mem_rman.rm_type = RMAN_ARRAY;
146 snprintf(buf, sizeof(buf), "%s memory window",
147 device_get_nameunit(dev));
148 sc->mem_rman.rm_descr = strdup(buf, M_DEVBUF);
149
150 sc->io_rman.rm_type = RMAN_ARRAY;
151 snprintf(buf, sizeof(buf), "%s I/O port window",
152 device_get_nameunit(dev));
153 sc->io_rman.rm_descr = strdup(buf, M_DEVBUF);
154
155 /* Initialize rman and allocate memory regions */
156 error = rman_init(&sc->pmem_rman);
157 if (error) {
158 device_printf(dev, "rman_init() failed. error = %d\n", error);
159 goto err_pmem_rman;
160 }
161
162 error = rman_init(&sc->mem_rman);
163 if (error) {
164 device_printf(dev, "rman_init() failed. error = %d\n", error);
165 goto err_mem_rman;
166 }
167
168 error = rman_init(&sc->io_rman);
169 if (error) {
170 device_printf(dev, "rman_init() failed. error = %d\n", error);
171 goto err_io_rman;
172 }
173
174 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
175 phys_base = sc->ranges[tuple].phys_base;
176 pci_base = sc->ranges[tuple].pci_base;
177 size = sc->ranges[tuple].size;
178 rid = tuple + 1;
179 if (size == 0)
180 continue; /* empty range element */
181 switch (FLAG_TYPE(sc->ranges[tuple].flags)) {
182 case FLAG_TYPE_PMEM:
183 sc->has_pmem = true;
184 range_descr = "prefetch";
185 flags = RF_PREFETCHABLE;
186 type = SYS_RES_MEMORY;
187 rm = &sc->pmem_rman;
188 break;
189 case FLAG_TYPE_MEM:
190 range_descr = "memory";
191 flags = 0;
192 type = SYS_RES_MEMORY;
193 rm = &sc->mem_rman;
194 break;
195 case FLAG_TYPE_IO:
196 range_descr = "I/O port";
197 flags = 0;
198 type = SYS_RES_IOPORT;
199 rm = &sc->io_rman;
200 break;
201 default:
202 continue;
203 }
204 if (bootverbose)
205 device_printf(dev,
206 "PCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx, Type: %s\n",
207 pci_base, phys_base, size, range_descr);
208 error = bus_set_resource(dev, type, rid, phys_base, size);
209 if (error != 0) {
210 device_printf(dev,
211 "failed to set resource for range %d: %d\n", tuple,
212 error);
213 continue;
214 }
215 sc->ranges[tuple].res = bus_alloc_resource_any(dev, type, &rid,
216 RF_ACTIVE | RF_UNMAPPED | flags);
217 if (sc->ranges[tuple].res == NULL) {
218 device_printf(dev,
219 "failed to allocate resource for range %d\n", tuple);
220 continue;
221 }
222 error = rman_manage_region(rm, pci_base, pci_base + size - 1);
223 if (error) {
224 device_printf(dev, "rman_manage_region() failed."
225 "error = %d\n", error);
226 continue;
227 }
228 }
229
230 return (0);
231
232 err_io_rman:
233 rman_fini(&sc->mem_rman);
234 err_mem_rman:
235 rman_fini(&sc->pmem_rman);
236 err_pmem_rman:
237 free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF);
238 free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF);
239 free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF);
240 if (sc->res != NULL)
241 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res);
242 err_resource:
243 bus_dma_tag_destroy(sc->dmat);
244 return (error);
245 }
246
247 int
pci_host_generic_core_detach(device_t dev)248 pci_host_generic_core_detach(device_t dev)
249 {
250 struct generic_pcie_core_softc *sc;
251 int error, tuple, type;
252
253 sc = device_get_softc(dev);
254
255 error = bus_generic_detach(dev);
256 if (error != 0)
257 return (error);
258
259 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
260 if (sc->ranges[tuple].size == 0)
261 continue; /* empty range element */
262 switch (FLAG_TYPE(sc->ranges[tuple].flags)) {
263 case FLAG_TYPE_PMEM:
264 case FLAG_TYPE_MEM:
265 type = SYS_RES_MEMORY;
266 break;
267 case FLAG_TYPE_IO:
268 type = SYS_RES_IOPORT;
269 break;
270 default:
271 continue;
272 }
273 if (sc->ranges[tuple].res != NULL)
274 bus_release_resource(dev, type, tuple + 1,
275 sc->ranges[tuple].res);
276 bus_delete_resource(dev, type, tuple + 1);
277 }
278 rman_fini(&sc->io_rman);
279 rman_fini(&sc->mem_rman);
280 rman_fini(&sc->pmem_rman);
281 free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF);
282 free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF);
283 free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF);
284 if (sc->res != NULL)
285 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res);
286 bus_dma_tag_destroy(sc->dmat);
287
288 return (0);
289 }
290
291 static uint32_t
generic_pcie_read_config(device_t dev,u_int bus,u_int slot,u_int func,u_int reg,int bytes)292 generic_pcie_read_config(device_t dev, u_int bus, u_int slot,
293 u_int func, u_int reg, int bytes)
294 {
295 struct generic_pcie_core_softc *sc;
296 uint64_t offset;
297 uint32_t data;
298
299 sc = device_get_softc(dev);
300 if ((bus < sc->bus_start) || (bus > sc->bus_end))
301 return (~0U);
302 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) ||
303 (reg > PCIE_REGMAX))
304 return (~0U);
305 if ((sc->quirks & PCIE_ECAM_DESIGNWARE_QUIRK) && bus == 0 && slot > 0)
306 return (~0U);
307
308 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
309
310 switch (bytes) {
311 case 1:
312 data = bus_read_1(sc->res, offset);
313 break;
314 case 2:
315 data = le16toh(bus_read_2(sc->res, offset));
316 break;
317 case 4:
318 data = le32toh(bus_read_4(sc->res, offset));
319 break;
320 default:
321 return (~0U);
322 }
323
324 return (data);
325 }
326
327 static void
generic_pcie_write_config(device_t dev,u_int bus,u_int slot,u_int func,u_int reg,uint32_t val,int bytes)328 generic_pcie_write_config(device_t dev, u_int bus, u_int slot,
329 u_int func, u_int reg, uint32_t val, int bytes)
330 {
331 struct generic_pcie_core_softc *sc;
332 uint64_t offset;
333
334 sc = device_get_softc(dev);
335 if ((bus < sc->bus_start) || (bus > sc->bus_end))
336 return;
337 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) ||
338 (reg > PCIE_REGMAX))
339 return;
340
341 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
342
343 switch (bytes) {
344 case 1:
345 bus_write_1(sc->res, offset, val);
346 break;
347 case 2:
348 bus_write_2(sc->res, offset, htole16(val));
349 break;
350 case 4:
351 bus_write_4(sc->res, offset, htole32(val));
352 break;
353 default:
354 return;
355 }
356 }
357
358 static int
generic_pcie_maxslots(device_t dev)359 generic_pcie_maxslots(device_t dev)
360 {
361
362 return (31); /* max slots per bus acc. to standard */
363 }
364
365 static int
generic_pcie_read_ivar(device_t dev,device_t child,int index,uintptr_t * result)366 generic_pcie_read_ivar(device_t dev, device_t child, int index,
367 uintptr_t *result)
368 {
369 struct generic_pcie_core_softc *sc;
370
371 sc = device_get_softc(dev);
372
373 if (index == PCIB_IVAR_BUS) {
374 *result = sc->bus_start;
375 return (0);
376 }
377
378 if (index == PCIB_IVAR_DOMAIN) {
379 *result = sc->ecam;
380 return (0);
381 }
382
383 if (bootverbose)
384 device_printf(dev, "ERROR: Unknown index %d.\n", index);
385 return (ENOENT);
386 }
387
388 static int
generic_pcie_write_ivar(device_t dev,device_t child,int index,uintptr_t value)389 generic_pcie_write_ivar(device_t dev, device_t child, int index,
390 uintptr_t value)
391 {
392
393 return (ENOENT);
394 }
395
396 static struct rman *
generic_pcie_get_rman(device_t dev,int type,u_int flags)397 generic_pcie_get_rman(device_t dev, int type, u_int flags)
398 {
399 struct generic_pcie_core_softc *sc = device_get_softc(dev);
400
401 switch (type) {
402 case SYS_RES_IOPORT:
403 return (&sc->io_rman);
404 case SYS_RES_MEMORY:
405 if (sc->has_pmem && (flags & RF_PREFETCHABLE) != 0)
406 return (&sc->pmem_rman);
407 return (&sc->mem_rman);
408 default:
409 break;
410 }
411
412 return (NULL);
413 }
414
415 int
pci_host_generic_core_release_resource(device_t dev,device_t child,struct resource * res)416 pci_host_generic_core_release_resource(device_t dev, device_t child,
417 struct resource *res)
418 {
419 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
420 struct generic_pcie_core_softc *sc;
421
422 sc = device_get_softc(dev);
423 #endif
424 switch (rman_get_type(res)) {
425 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
426 case PCI_RES_BUS:
427 return (pci_domain_release_bus(sc->ecam, child, res));
428 #endif
429 case SYS_RES_IOPORT:
430 case SYS_RES_MEMORY:
431 return (bus_generic_rman_release_resource(dev, child, res));
432 default:
433 return (bus_generic_release_resource(dev, child, res));
434 }
435 }
436
437 static struct pcie_range *
generic_pcie_containing_range(device_t dev,int type,rman_res_t start,rman_res_t end)438 generic_pcie_containing_range(device_t dev, int type, rman_res_t start,
439 rman_res_t end)
440 {
441 struct generic_pcie_core_softc *sc = device_get_softc(dev);
442 uint64_t pci_base;
443 uint64_t size;
444 int i, space;
445
446 switch (type) {
447 case SYS_RES_IOPORT:
448 case SYS_RES_MEMORY:
449 break;
450 default:
451 return (NULL);
452 }
453
454 for (i = 0; i < MAX_RANGES_TUPLES; i++) {
455 pci_base = sc->ranges[i].pci_base;
456 size = sc->ranges[i].size;
457 if (size == 0)
458 continue; /* empty range element */
459
460 if (start < pci_base || end >= pci_base + size)
461 continue;
462
463 switch (FLAG_TYPE(sc->ranges[i].flags)) {
464 case FLAG_TYPE_MEM:
465 case FLAG_TYPE_PMEM:
466 space = SYS_RES_MEMORY;
467 break;
468 case FLAG_TYPE_IO:
469 space = SYS_RES_IOPORT;
470 break;
471 default:
472 continue;
473 }
474
475 if (type == space)
476 return (&sc->ranges[i]);
477 }
478 return (NULL);
479 }
480
481 static int
generic_pcie_translate_resource(device_t dev,int type,rman_res_t start,rman_res_t * new_start)482 generic_pcie_translate_resource(device_t dev, int type, rman_res_t start,
483 rman_res_t *new_start)
484 {
485 struct pcie_range *range;
486
487 /* Translate the address from a PCI address to a physical address */
488 switch (type) {
489 case SYS_RES_IOPORT:
490 case SYS_RES_MEMORY:
491 range = generic_pcie_containing_range(dev, type, start, start);
492 if (range == NULL)
493 return (ENOENT);
494 *new_start = start - range->pci_base + range->phys_base;
495 break;
496 default:
497 /* No translation for non-memory types */
498 *new_start = start;
499 break;
500 }
501
502 return (0);
503 }
504
505 struct resource *
pci_host_generic_core_alloc_resource(device_t dev,device_t child,int type,int * rid,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags)506 pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type,
507 int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
508 {
509 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
510 struct generic_pcie_core_softc *sc;
511 #endif
512 struct resource *res;
513
514 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
515 sc = device_get_softc(dev);
516 #endif
517
518 switch (type) {
519 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
520 case PCI_RES_BUS:
521 res = pci_domain_alloc_bus(sc->ecam, child, rid, start, end,
522 count, flags);
523 break;
524 #endif
525 case SYS_RES_IOPORT:
526 case SYS_RES_MEMORY:
527 res = bus_generic_rman_alloc_resource(dev, child, type, rid,
528 start, end, count, flags);
529 break;
530 default:
531 res = bus_generic_alloc_resource(dev, child, type, rid, start,
532 end, count, flags);
533 break;
534 }
535 if (res == NULL) {
536 device_printf(dev, "%s FAIL: type=%d, rid=%d, "
537 "start=%016jx, end=%016jx, count=%016jx, flags=%x\n",
538 __func__, type, *rid, start, end, count, flags);
539 }
540 return (res);
541 }
542
543 static int
generic_pcie_activate_resource(device_t dev,device_t child,struct resource * r)544 generic_pcie_activate_resource(device_t dev, device_t child, struct resource *r)
545 {
546 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
547 struct generic_pcie_core_softc *sc;
548
549 sc = device_get_softc(dev);
550 #endif
551 switch (rman_get_type(r)) {
552 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
553 case PCI_RES_BUS:
554 return (pci_domain_activate_bus(sc->ecam, child, r));
555 #endif
556 case SYS_RES_IOPORT:
557 case SYS_RES_MEMORY:
558 return (bus_generic_rman_activate_resource(dev, child, r));
559 default:
560 return (bus_generic_activate_resource(dev, child, r));
561 }
562 }
563
564 static int
generic_pcie_deactivate_resource(device_t dev,device_t child,struct resource * r)565 generic_pcie_deactivate_resource(device_t dev, device_t child,
566 struct resource *r)
567 {
568 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
569 struct generic_pcie_core_softc *sc;
570
571 sc = device_get_softc(dev);
572 #endif
573 switch (rman_get_type(r)) {
574 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
575 case PCI_RES_BUS:
576 return (pci_domain_deactivate_bus(sc->ecam, child, r));
577 #endif
578 case SYS_RES_IOPORT:
579 case SYS_RES_MEMORY:
580 return (bus_generic_rman_deactivate_resource(dev, child, r));
581 default:
582 return (bus_generic_deactivate_resource(dev, child, r));
583 }
584 }
585
586 static int
generic_pcie_adjust_resource(device_t dev,device_t child,struct resource * res,rman_res_t start,rman_res_t end)587 generic_pcie_adjust_resource(device_t dev, device_t child,
588 struct resource *res, rman_res_t start, rman_res_t end)
589 {
590 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
591 struct generic_pcie_core_softc *sc;
592
593 sc = device_get_softc(dev);
594 #endif
595 switch (rman_get_type(res)) {
596 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
597 case PCI_RES_BUS:
598 return (pci_domain_adjust_bus(sc->ecam, child, res, start,
599 end));
600 #endif
601 case SYS_RES_IOPORT:
602 case SYS_RES_MEMORY:
603 return (bus_generic_rman_adjust_resource(dev, child, res,
604 start, end));
605 default:
606 return (bus_generic_adjust_resource(dev, child, res, start,
607 end));
608 }
609 }
610
611 static int
generic_pcie_map_resource(device_t dev,device_t child,struct resource * r,struct resource_map_request * argsp,struct resource_map * map)612 generic_pcie_map_resource(device_t dev, device_t child, struct resource *r,
613 struct resource_map_request *argsp, struct resource_map *map)
614 {
615 struct resource_map_request args;
616 struct pcie_range *range;
617 rman_res_t length, start;
618 int error, type;
619
620 type = rman_get_type(r);
621 switch (type) {
622 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
623 case PCI_RES_BUS:
624 return (EINVAL);
625 #endif
626 case SYS_RES_IOPORT:
627 case SYS_RES_MEMORY:
628 break;
629 default:
630 return (bus_generic_map_resource(dev, child, r, argsp, map));
631 }
632
633 /* Resources must be active to be mapped. */
634 if (!(rman_get_flags(r) & RF_ACTIVE))
635 return (ENXIO);
636
637 resource_init_map_request(&args);
638 error = resource_validate_map_request(r, argsp, &args, &start, &length);
639 if (error)
640 return (error);
641
642 range = generic_pcie_containing_range(dev, type, rman_get_start(r),
643 rman_get_end(r));
644 if (range == NULL || range->res == NULL)
645 return (ENOENT);
646
647 args.offset = start - range->pci_base;
648 args.length = length;
649 return (bus_map_resource(dev, range->res, &args, map));
650 }
651
652 static int
generic_pcie_unmap_resource(device_t dev,device_t child,struct resource * r,struct resource_map * map)653 generic_pcie_unmap_resource(device_t dev, device_t child, struct resource *r,
654 struct resource_map *map)
655 {
656 struct pcie_range *range;
657 int type;
658
659 type = rman_get_type(r);
660 switch (type) {
661 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
662 case PCI_RES_BUS:
663 return (EINVAL);
664 #endif
665 case SYS_RES_IOPORT:
666 case SYS_RES_MEMORY:
667 break;
668 default:
669 return (bus_generic_unmap_resource(dev, child, r, map));
670 }
671
672 range = generic_pcie_containing_range(dev, type, rman_get_start(r),
673 rman_get_end(r));
674 if (range == NULL || range->res == NULL)
675 return (ENOENT);
676 return (bus_unmap_resource(dev, range->res, map));
677 }
678
679 static bus_dma_tag_t
generic_pcie_get_dma_tag(device_t dev,device_t child)680 generic_pcie_get_dma_tag(device_t dev, device_t child)
681 {
682 struct generic_pcie_core_softc *sc;
683
684 sc = device_get_softc(dev);
685 return (sc->dmat);
686 }
687
688 static device_method_t generic_pcie_methods[] = {
689 DEVMETHOD(device_attach, pci_host_generic_core_attach),
690 DEVMETHOD(device_detach, pci_host_generic_core_detach),
691
692 DEVMETHOD(bus_get_rman, generic_pcie_get_rman),
693 DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar),
694 DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar),
695 DEVMETHOD(bus_alloc_resource, pci_host_generic_core_alloc_resource),
696 DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource),
697 DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource),
698 DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource),
699 DEVMETHOD(bus_release_resource, pci_host_generic_core_release_resource),
700 DEVMETHOD(bus_translate_resource, generic_pcie_translate_resource),
701 DEVMETHOD(bus_map_resource, generic_pcie_map_resource),
702 DEVMETHOD(bus_unmap_resource, generic_pcie_unmap_resource),
703 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
704 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
705
706 DEVMETHOD(bus_get_dma_tag, generic_pcie_get_dma_tag),
707
708 /* pcib interface */
709 DEVMETHOD(pcib_maxslots, generic_pcie_maxslots),
710 DEVMETHOD(pcib_read_config, generic_pcie_read_config),
711 DEVMETHOD(pcib_write_config, generic_pcie_write_config),
712
713 DEVMETHOD_END
714 };
715
716 DEFINE_CLASS_0(pcib, generic_pcie_core_driver,
717 generic_pcie_methods, sizeof(struct generic_pcie_core_softc));
718