1 // Initialize PCI devices (on emulators)
2 //
3 // Copyright (C) 2008 Kevin O'Connor <kevin@koconnor.net>
4 // Copyright (C) 2006 Fabrice Bellard
5 //
6 // This file may be distributed under the terms of the GNU LGPLv3 license.
7
8 #include "byteorder.h" // le64_to_cpu
9 #include "config.h" // CONFIG_*
10 #include "dev-q35.h" // Q35_HOST_BRIDGE_PCIEXBAR_ADDR
11 #include "dev-piix.h" // PIIX_*
12 #include "e820map.h" // e820_add
13 #include "hw/ata.h" // PORT_ATA1_CMD_BASE
14 #include "hw/pci.h" // pci_config_readl
15 #include "hw/pcidevice.h" // pci_probe_devices
16 #include "hw/pci_ids.h" // PCI_VENDOR_ID_INTEL
17 #include "hw/pci_regs.h" // PCI_COMMAND
18 #include "fw/dev-pci.h" // REDHAT_CAP_RESOURCE_RESERVE
19 #include "list.h" // struct hlist_node
20 #include "malloc.h" // free
21 #include "output.h" // dprintf
22 #include "paravirt.h" // RamSize
23 #include "romfile.h" // romfile_loadint
24 #include "string.h" // memset
25 #include "util.h" // pci_setup
26 #include "x86.h" // outb
27
28 #define PCI_DEVICE_MEM_MIN (1<<12) // 4k == page size
29 #define PCI_BRIDGE_MEM_MIN (1<<21) // 2M == hugepage size
30 #define PCI_BRIDGE_IO_MIN 0x1000 // mandated by pci bridge spec
31
32 #define PCI_ROM_SLOT 6
33 #define PCI_NUM_REGIONS 7
34 #define PCI_BRIDGE_NUM_REGIONS 2
35
36 enum pci_region_type {
37 PCI_REGION_TYPE_IO,
38 PCI_REGION_TYPE_MEM,
39 PCI_REGION_TYPE_PREFMEM,
40 PCI_REGION_TYPE_COUNT,
41 };
42
43 static const char *region_type_name[] = {
44 [ PCI_REGION_TYPE_IO ] = "io",
45 [ PCI_REGION_TYPE_MEM ] = "mem",
46 [ PCI_REGION_TYPE_PREFMEM ] = "prefmem",
47 };
48
49 u64 pcimem_start = BUILD_PCIMEM_START;
50 u64 pcimem_end = BUILD_PCIMEM_END;
51 u64 pcimem64_start = BUILD_PCIMEM64_START;
52 u64 pcimem64_end = BUILD_PCIMEM64_END;
53 u64 pci_io_low_end = 0xa000;
54
55 struct pci_region_entry {
56 struct pci_device *dev;
57 int bar;
58 u64 size;
59 u64 align;
60 int is64;
61 enum pci_region_type type;
62 struct hlist_node node;
63 };
64
65 struct pci_region {
66 /* pci region assignments */
67 u64 base;
68 struct hlist_head list;
69 };
70
71 struct pci_bus {
72 struct pci_region r[PCI_REGION_TYPE_COUNT];
73 struct pci_device *bus_dev;
74 };
75
pci_bar(struct pci_device * pci,int region_num)76 static u32 pci_bar(struct pci_device *pci, int region_num)
77 {
78 if (region_num != PCI_ROM_SLOT) {
79 return PCI_BASE_ADDRESS_0 + region_num * 4;
80 }
81
82 #define PCI_HEADER_TYPE_MULTI_FUNCTION 0x80
83 u8 type = pci->header_type & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
84 return type == PCI_HEADER_TYPE_BRIDGE ? PCI_ROM_ADDRESS1 : PCI_ROM_ADDRESS;
85 }
86
87 static void
pci_set_io_region_addr(struct pci_device * pci,int bar,u64 addr,int is64)88 pci_set_io_region_addr(struct pci_device *pci, int bar, u64 addr, int is64)
89 {
90 u32 ofs = pci_bar(pci, bar);
91 pci_config_writel(pci->bdf, ofs, addr);
92 if (is64)
93 pci_config_writel(pci->bdf, ofs + 4, addr >> 32);
94 }
95
96
97 /****************************************************************
98 * Misc. device init
99 ****************************************************************/
100
101 /* host irqs corresponding to PCI irqs A-D */
102 const u8 pci_irqs[4] = {
103 10, 10, 11, 11
104 };
105
dummy_pci_slot_get_irq(struct pci_device * pci,int pin)106 static int dummy_pci_slot_get_irq(struct pci_device *pci, int pin)
107 {
108 dprintf(1, "pci_slot_get_irq called with unknown routing\n");
109
110 return 0xff; /* PCI defined "unknown" or "no connection" for x86 */
111 }
112
113 static int (*pci_slot_get_irq)(struct pci_device *pci, int pin) =
114 dummy_pci_slot_get_irq;
115
116 // Return the global irq number corresponding to a host bus device irq pin.
piix_pci_slot_get_irq(struct pci_device * pci,int pin)117 static int piix_pci_slot_get_irq(struct pci_device *pci, int pin)
118 {
119 int slot_addend = 0;
120
121 while (pci->parent != NULL) {
122 slot_addend += pci_bdf_to_dev(pci->bdf);
123 pci = pci->parent;
124 }
125 slot_addend += pci_bdf_to_dev(pci->bdf) - 1;
126 return pci_irqs[(pin - 1 + slot_addend) & 3];
127 }
128
mch_pci_slot_get_irq(struct pci_device * pci,int pin)129 static int mch_pci_slot_get_irq(struct pci_device *pci, int pin)
130 {
131 int pin_addend = 0;
132 while (pci->parent != NULL) {
133 pin_addend += pci_bdf_to_dev(pci->bdf);
134 pci = pci->parent;
135 }
136 u8 slot = pci_bdf_to_dev(pci->bdf);
137 if (slot <= 24)
138 /* Slots 0-24 rotate slot:pin mapping similar to piix above, but
139 with a different starting index - see q35-acpi-dsdt.dsl */
140 return pci_irqs[(pin - 1 + pin_addend + slot) & 3];
141 /* Slots 25-31 all use LNKA mapping (or LNKE, but A:D = E:H) */
142 return pci_irqs[(pin - 1 + pin_addend) & 3];
143 }
144
145 /* PIIX3/PIIX4 PCI to ISA bridge */
piix_isa_bridge_setup(struct pci_device * pci,void * arg)146 static void piix_isa_bridge_setup(struct pci_device *pci, void *arg)
147 {
148 int i, irq;
149 u8 elcr[2];
150
151 elcr[0] = 0x00;
152 elcr[1] = 0x00;
153 for (i = 0; i < 4; i++) {
154 irq = pci_irqs[i];
155 /* set to trigger level */
156 elcr[irq >> 3] |= (1 << (irq & 7));
157 /* activate irq remapping in PIIX */
158 pci_config_writeb(pci->bdf, 0x60 + i, irq);
159 }
160 outb(elcr[0], PIIX_PORT_ELCR1);
161 outb(elcr[1], PIIX_PORT_ELCR2);
162 dprintf(1, "PIIX3/PIIX4 init: elcr=%02x %02x\n", elcr[0], elcr[1]);
163 }
164
mch_isa_lpc_setup(u16 bdf)165 static void mch_isa_lpc_setup(u16 bdf)
166 {
167 /* pm io base */
168 pci_config_writel(bdf, ICH9_LPC_PMBASE,
169 acpi_pm_base | ICH9_LPC_PMBASE_RTE);
170
171 /* acpi enable, SCI: IRQ9 000b = irq9*/
172 pci_config_writeb(bdf, ICH9_LPC_ACPI_CTRL, ICH9_LPC_ACPI_CTRL_ACPI_EN);
173
174 /* set root complex register block BAR */
175 pci_config_writel(bdf, ICH9_LPC_RCBA,
176 ICH9_LPC_RCBA_ADDR | ICH9_LPC_RCBA_EN);
177 }
178
179 static int ICH9LpcBDF = -1;
180
181 /* ICH9 LPC PCI to ISA bridge */
182 /* PCI_VENDOR_ID_INTEL && PCI_DEVICE_ID_INTEL_ICH9_LPC */
mch_isa_bridge_setup(struct pci_device * dev,void * arg)183 static void mch_isa_bridge_setup(struct pci_device *dev, void *arg)
184 {
185 u16 bdf = dev->bdf;
186 int i, irq;
187 u8 elcr[2];
188
189 elcr[0] = 0x00;
190 elcr[1] = 0x00;
191
192 for (i = 0; i < 4; i++) {
193 irq = pci_irqs[i];
194 /* set to trigger level */
195 elcr[irq >> 3] |= (1 << (irq & 7));
196
197 /* activate irq remapping in LPC */
198
199 /* PIRQ[A-D] routing */
200 pci_config_writeb(bdf, ICH9_LPC_PIRQA_ROUT + i, irq);
201 /* PIRQ[E-H] routing */
202 pci_config_writeb(bdf, ICH9_LPC_PIRQE_ROUT + i, irq);
203 }
204 outb(elcr[0], ICH9_LPC_PORT_ELCR1);
205 outb(elcr[1], ICH9_LPC_PORT_ELCR2);
206 dprintf(1, "Q35 LPC init: elcr=%02x %02x\n", elcr[0], elcr[1]);
207
208 ICH9LpcBDF = bdf;
209
210 mch_isa_lpc_setup(bdf);
211
212 e820_add(ICH9_LPC_RCBA_ADDR, 16*1024, E820_RESERVED);
213
214 acpi_pm1a_cnt = acpi_pm_base + 0x04;
215 pmtimer_setup(acpi_pm_base + 0x08);
216 }
217
storage_ide_setup(struct pci_device * pci,void * arg)218 static void storage_ide_setup(struct pci_device *pci, void *arg)
219 {
220 /* IDE: we map it as in ISA mode */
221 pci_set_io_region_addr(pci, 0, PORT_ATA1_CMD_BASE, 0);
222 pci_set_io_region_addr(pci, 1, PORT_ATA1_CTRL_BASE, 0);
223 pci_set_io_region_addr(pci, 2, PORT_ATA2_CMD_BASE, 0);
224 pci_set_io_region_addr(pci, 3, PORT_ATA2_CTRL_BASE, 0);
225 }
226
227 /* PIIX3/PIIX4 IDE */
piix_ide_setup(struct pci_device * pci,void * arg)228 static void piix_ide_setup(struct pci_device *pci, void *arg)
229 {
230 u16 bdf = pci->bdf;
231 pci_config_writew(bdf, 0x40, 0x8000); // enable IDE0
232 pci_config_writew(bdf, 0x42, 0x8000); // enable IDE1
233 }
234
pic_ibm_setup(struct pci_device * pci,void * arg)235 static void pic_ibm_setup(struct pci_device *pci, void *arg)
236 {
237 /* PIC, IBM, MPIC & MPIC2 */
238 pci_set_io_region_addr(pci, 0, 0x80800000 + 0x00040000, 0);
239 }
240
apple_macio_setup(struct pci_device * pci,void * arg)241 static void apple_macio_setup(struct pci_device *pci, void *arg)
242 {
243 /* macio bridge */
244 pci_set_io_region_addr(pci, 0, 0x80800000, 0);
245 }
246
piix4_pm_config_setup(u16 bdf)247 static void piix4_pm_config_setup(u16 bdf)
248 {
249 // acpi sci is hardwired to 9
250 pci_config_writeb(bdf, PCI_INTERRUPT_LINE, 9);
251
252 pci_config_writel(bdf, PIIX_PMBASE, acpi_pm_base | 1);
253 pci_config_writeb(bdf, PIIX_PMREGMISC, 0x01); /* enable PM io space */
254 pci_config_writel(bdf, PIIX_SMBHSTBASE, (acpi_pm_base + 0x100) | 1);
255 pci_config_writeb(bdf, PIIX_SMBHSTCFG, 0x09); /* enable SMBus io space */
256 }
257
258 static int PiixPmBDF = -1;
259
260 /* PIIX4 Power Management device (for ACPI) */
piix4_pm_setup(struct pci_device * pci,void * arg)261 static void piix4_pm_setup(struct pci_device *pci, void *arg)
262 {
263 PiixPmBDF = pci->bdf;
264 piix4_pm_config_setup(pci->bdf);
265
266 acpi_pm1a_cnt = acpi_pm_base + 0x04;
267 pmtimer_setup(acpi_pm_base + 0x08);
268 }
269
ich9_smbus_enable(u16 bdf)270 static void ich9_smbus_enable(u16 bdf)
271 {
272 /* map smbus into io space */
273 pci_config_writel(bdf, ICH9_SMB_SMB_BASE,
274 (acpi_pm_base + 0x100) | PCI_BASE_ADDRESS_SPACE_IO);
275
276 /* enable SMBus */
277 pci_config_writeb(bdf, ICH9_SMB_HOSTC, ICH9_SMB_HOSTC_HST_EN);
278 }
279
280 static int ICH9SmbusBDF = -1;
281
282 /* ICH9 SMBUS */
283 /* PCI_VENDOR_ID_INTEL && PCI_DEVICE_ID_INTEL_ICH9_SMBUS */
ich9_smbus_setup(struct pci_device * dev,void * arg)284 static void ich9_smbus_setup(struct pci_device *dev, void *arg)
285 {
286 ICH9SmbusBDF = dev->bdf;
287
288 ich9_smbus_enable(dev->bdf);
289 }
290
intel_igd_setup(struct pci_device * dev,void * arg)291 static void intel_igd_setup(struct pci_device *dev, void *arg)
292 {
293 struct romfile_s *opregion = romfile_find("etc/igd-opregion");
294 u64 bdsm_size = le64_to_cpu(romfile_loadint("etc/igd-bdsm-size", 0));
295
296 /* Apply OpRegion to any Intel VGA device, more than one is undefined */
297 if (opregion && opregion->size) {
298 void *addr = memalign_high(PAGE_SIZE, opregion->size);
299 if (!addr) {
300 warn_noalloc();
301 return;
302 }
303
304 if (opregion->copy(opregion, addr, opregion->size) < 0) {
305 free(addr);
306 return;
307 }
308
309 pci_config_writel(dev->bdf, 0xFC, cpu_to_le32((u32)addr));
310
311 dprintf(1, "Intel IGD OpRegion enabled at 0x%08x, size %dKB, dev %pP\n"
312 , (u32)addr, opregion->size >> 10, dev);
313 }
314
315 /* Apply BDSM only to Intel VGA at 00:02.0 */
316 if (bdsm_size && (dev->bdf == pci_to_bdf(0, 2, 0))) {
317 void *addr = memalign_tmphigh(1024 * 1024, bdsm_size);
318 if (!addr) {
319 warn_noalloc();
320 return;
321 }
322
323 e820_add((u32)addr, bdsm_size, E820_RESERVED);
324
325 pci_config_writel(dev->bdf, 0x5C, cpu_to_le32((u32)addr));
326
327 dprintf(1, "Intel IGD BDSM enabled at 0x%08x, size %lldMB, dev %pP\n"
328 , (u32)addr, bdsm_size >> 20, dev);
329 }
330 }
331
332 static const struct pci_device_id pci_device_tbl[] = {
333 /* PIIX3/PIIX4 PCI to ISA bridge */
334 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0,
335 piix_isa_bridge_setup),
336 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
337 piix_isa_bridge_setup),
338 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_LPC,
339 mch_isa_bridge_setup),
340
341 /* STORAGE IDE */
342 PCI_DEVICE_CLASS(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_1,
343 PCI_CLASS_STORAGE_IDE, piix_ide_setup),
344 PCI_DEVICE_CLASS(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB,
345 PCI_CLASS_STORAGE_IDE, piix_ide_setup),
346 PCI_DEVICE_CLASS(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE,
347 storage_ide_setup),
348
349 /* PIC, IBM, MPIC & MPIC2 */
350 PCI_DEVICE_CLASS(PCI_VENDOR_ID_IBM, 0x0046, PCI_CLASS_SYSTEM_PIC,
351 pic_ibm_setup),
352 PCI_DEVICE_CLASS(PCI_VENDOR_ID_IBM, 0xFFFF, PCI_CLASS_SYSTEM_PIC,
353 pic_ibm_setup),
354
355 /* PIIX4 Power Management device (for ACPI) */
356 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3,
357 piix4_pm_setup),
358 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_SMBUS,
359 ich9_smbus_setup),
360
361 /* 0xff00 */
362 PCI_DEVICE_CLASS(PCI_VENDOR_ID_APPLE, 0x0017, 0xff00, apple_macio_setup),
363 PCI_DEVICE_CLASS(PCI_VENDOR_ID_APPLE, 0x0022, 0xff00, apple_macio_setup),
364
365 /* Intel IGD OpRegion setup */
366 PCI_DEVICE_CLASS(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA,
367 intel_igd_setup),
368
369 PCI_DEVICE_END,
370 };
371
372 static int MCHMmcfgBDF = -1;
373 static void mch_mmconfig_setup(u16 bdf);
374
pci_resume(void)375 void pci_resume(void)
376 {
377 if (!CONFIG_QEMU) {
378 return;
379 }
380
381 if (PiixPmBDF >= 0) {
382 piix4_pm_config_setup(PiixPmBDF);
383 }
384
385 if (ICH9LpcBDF >= 0) {
386 mch_isa_lpc_setup(ICH9LpcBDF);
387 }
388
389 if (ICH9SmbusBDF >= 0) {
390 ich9_smbus_enable(ICH9SmbusBDF);
391 }
392
393 if(MCHMmcfgBDF >= 0) {
394 mch_mmconfig_setup(MCHMmcfgBDF);
395 }
396 }
397
pci_bios_init_device(struct pci_device * pci)398 static void pci_bios_init_device(struct pci_device *pci)
399 {
400 dprintf(1, "PCI: init bdf=%pP id=%04x:%04x\n"
401 , pci, pci->vendor, pci->device);
402
403 /* map the interrupt */
404 u16 bdf = pci->bdf;
405 int pin = pci_config_readb(bdf, PCI_INTERRUPT_PIN);
406 if (pin != 0)
407 pci_config_writeb(bdf, PCI_INTERRUPT_LINE, pci_slot_get_irq(pci, pin));
408
409 pci_init_device(pci_device_tbl, pci, NULL);
410
411 /* enable memory mappings */
412 pci_config_maskw(bdf, PCI_COMMAND, 0,
413 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_SERR);
414 /* enable SERR# for forwarding */
415 if (pci->header_type & PCI_HEADER_TYPE_BRIDGE)
416 pci_config_maskw(bdf, PCI_BRIDGE_CONTROL, 0,
417 PCI_BRIDGE_CTL_SERR);
418 }
419
pci_bios_init_devices(void)420 static void pci_bios_init_devices(void)
421 {
422 struct pci_device *pci;
423 foreachpci(pci) {
424 pci_bios_init_device(pci);
425 }
426 }
427
pci_enable_default_vga(void)428 static void pci_enable_default_vga(void)
429 {
430 struct pci_device *pci;
431
432 foreachpci(pci) {
433 if (is_pci_vga(pci)) {
434 dprintf(1, "PCI: Using %pP for primary VGA\n", pci);
435 return;
436 }
437 }
438
439 pci = pci_find_class(PCI_CLASS_DISPLAY_VGA);
440 if (!pci) {
441 dprintf(1, "PCI: No VGA devices found\n");
442 return;
443 }
444
445 dprintf(1, "PCI: Enabling %pP for primary VGA\n", pci);
446
447 pci_config_maskw(pci->bdf, PCI_COMMAND, 0,
448 PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
449
450 while (pci->parent) {
451 pci = pci->parent;
452
453 dprintf(1, "PCI: Setting VGA enable on bridge %pP\n", pci);
454
455 pci_config_maskw(pci->bdf, PCI_BRIDGE_CONTROL, 0, PCI_BRIDGE_CTL_VGA);
456 pci_config_maskw(pci->bdf, PCI_COMMAND, 0,
457 PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
458 }
459 }
460
461 /****************************************************************
462 * Platform device initialization
463 ****************************************************************/
464
i440fx_mem_addr_setup(struct pci_device * dev,void * arg)465 static void i440fx_mem_addr_setup(struct pci_device *dev, void *arg)
466 {
467 if (RamSize <= 0x80000000)
468 pcimem_start = 0x80000000;
469 else if (RamSize <= 0xc0000000)
470 pcimem_start = 0xc0000000;
471
472 pci_slot_get_irq = piix_pci_slot_get_irq;
473 }
474
mch_mmconfig_setup(u16 bdf)475 static void mch_mmconfig_setup(u16 bdf)
476 {
477 u64 addr = Q35_HOST_BRIDGE_PCIEXBAR_ADDR;
478 u32 upper = addr >> 32;
479 u32 lower = (addr & 0xffffffff) | Q35_HOST_BRIDGE_PCIEXBAREN;
480 pci_config_writel(bdf, Q35_HOST_BRIDGE_PCIEXBAR, 0);
481 pci_config_writel(bdf, Q35_HOST_BRIDGE_PCIEXBAR + 4, upper);
482 pci_config_writel(bdf, Q35_HOST_BRIDGE_PCIEXBAR, lower);
483 pci_enable_mmconfig(Q35_HOST_BRIDGE_PCIEXBAR_ADDR, "q35");
484 }
485
mch_mem_addr_setup(struct pci_device * dev,void * arg)486 static void mch_mem_addr_setup(struct pci_device *dev, void *arg)
487 {
488 u64 addr = Q35_HOST_BRIDGE_PCIEXBAR_ADDR;
489 u32 size = Q35_HOST_BRIDGE_PCIEXBAR_SIZE;
490
491 /* setup mmconfig */
492 MCHMmcfgBDF = dev->bdf;
493 mch_mmconfig_setup(dev->bdf);
494 e820_add(addr, size, E820_RESERVED);
495
496 /* setup pci i/o window (above mmconfig) */
497 pcimem_start = addr + size;
498
499 pci_slot_get_irq = mch_pci_slot_get_irq;
500
501 /* setup io address space */
502 if (acpi_pm_base < 0x1000)
503 pci_io_low_end = 0x10000;
504 else
505 pci_io_low_end = acpi_pm_base;
506 }
507
508 static const struct pci_device_id pci_platform_tbl[] = {
509 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441,
510 i440fx_mem_addr_setup),
511 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_Q35_MCH,
512 mch_mem_addr_setup),
513 PCI_DEVICE_END
514 };
515
pci_bios_init_platform(void)516 static void pci_bios_init_platform(void)
517 {
518 struct pci_device *pci;
519 foreachpci(pci) {
520 pci_init_device(pci_platform_tbl, pci, NULL);
521 }
522 }
523
pci_find_resource_reserve_capability(u16 bdf)524 static u8 pci_find_resource_reserve_capability(u16 bdf)
525 {
526 u16 device_id;
527
528 if (pci_config_readw(bdf, PCI_VENDOR_ID) != PCI_VENDOR_ID_REDHAT) {
529 dprintf(3, "PCI: This is non-QEMU bridge.\n");
530 return 0;
531 }
532
533 device_id = pci_config_readw(bdf, PCI_DEVICE_ID);
534
535 if (device_id != PCI_DEVICE_ID_REDHAT_ROOT_PORT &&
536 device_id != PCI_DEVICE_ID_REDHAT_BRIDGE) {
537 dprintf(1, "PCI: QEMU resource reserve cap device ID doesn't match.\n");
538 return 0;
539 }
540 u8 cap = 0;
541
542 do {
543 cap = pci_find_capability(bdf, PCI_CAP_ID_VNDR, cap);
544 } while (cap &&
545 pci_config_readb(bdf, cap + PCI_CAP_REDHAT_TYPE_OFFSET) !=
546 REDHAT_CAP_RESOURCE_RESERVE);
547 if (cap) {
548 u8 cap_len = pci_config_readb(bdf, cap + PCI_CAP_FLAGS);
549 if (cap_len < RES_RESERVE_CAP_SIZE) {
550 dprintf(1, "PCI: QEMU resource reserve cap length %d is invalid\n",
551 cap_len);
552 return 0;
553 }
554 } else {
555 dprintf(1, "PCI: QEMU resource reserve cap not found\n");
556 }
557 return cap;
558 }
559
560 /****************************************************************
561 * Bus initialization
562 ****************************************************************/
563
564 static void
pci_bios_init_bus_rec(int bus,u8 * pci_bus)565 pci_bios_init_bus_rec(int bus, u8 *pci_bus)
566 {
567 int bdf;
568 u16 class;
569
570 dprintf(1, "PCI: %s bus = 0x%x\n", __func__, bus);
571
572 /* prevent accidental access to unintended devices */
573 foreachbdf(bdf, bus) {
574 class = pci_config_readw(bdf, PCI_CLASS_DEVICE);
575 if (class == PCI_CLASS_BRIDGE_PCI) {
576 pci_config_writeb(bdf, PCI_SECONDARY_BUS, 255);
577 pci_config_writeb(bdf, PCI_SUBORDINATE_BUS, 0);
578 }
579 }
580
581 foreachbdf(bdf, bus) {
582 class = pci_config_readw(bdf, PCI_CLASS_DEVICE);
583 if (class != PCI_CLASS_BRIDGE_PCI) {
584 continue;
585 }
586 dprintf(1, "PCI: %s bdf = 0x%x\n", __func__, bdf);
587
588 u8 pribus = pci_config_readb(bdf, PCI_PRIMARY_BUS);
589 if (pribus != bus) {
590 dprintf(1, "PCI: primary bus = 0x%x -> 0x%x\n", pribus, bus);
591 pci_config_writeb(bdf, PCI_PRIMARY_BUS, bus);
592 } else {
593 dprintf(1, "PCI: primary bus = 0x%x\n", pribus);
594 }
595
596 u8 secbus = pci_config_readb(bdf, PCI_SECONDARY_BUS);
597 (*pci_bus)++;
598 if (*pci_bus != secbus) {
599 dprintf(1, "PCI: secondary bus = 0x%x -> 0x%x\n",
600 secbus, *pci_bus);
601 secbus = *pci_bus;
602 pci_config_writeb(bdf, PCI_SECONDARY_BUS, secbus);
603 } else {
604 dprintf(1, "PCI: secondary bus = 0x%x\n", secbus);
605 }
606
607 /* set to max for access to all subordinate buses.
608 later set it to accurate value */
609 u8 subbus = pci_config_readb(bdf, PCI_SUBORDINATE_BUS);
610 pci_config_writeb(bdf, PCI_SUBORDINATE_BUS, 255);
611
612 pci_bios_init_bus_rec(secbus, pci_bus);
613
614 if (subbus != *pci_bus) {
615 u8 res_bus = *pci_bus;
616 u8 cap = pci_find_resource_reserve_capability(bdf);
617
618 if (cap) {
619 u32 tmp_res_bus = pci_config_readl(bdf,
620 cap + RES_RESERVE_BUS_RES);
621 if (tmp_res_bus != (u32)-1) {
622 res_bus = tmp_res_bus & 0xFF;
623 if ((u8)(res_bus + secbus) < secbus ||
624 (u8)(res_bus + secbus) < res_bus) {
625 dprintf(1, "PCI: bus_reserve value %d is invalid\n",
626 res_bus);
627 res_bus = 0;
628 }
629 if (secbus + res_bus > *pci_bus) {
630 dprintf(1, "PCI: QEMU resource reserve cap: bus = %u\n",
631 res_bus);
632 res_bus = secbus + res_bus;
633 }
634 }
635 }
636 dprintf(1, "PCI: subordinate bus = 0x%x -> 0x%x\n",
637 subbus, res_bus);
638 subbus = res_bus;
639 *pci_bus = res_bus;
640 } else {
641 dprintf(1, "PCI: subordinate bus = 0x%x\n", subbus);
642 }
643 pci_config_writeb(bdf, PCI_SUBORDINATE_BUS, subbus);
644 }
645 }
646
647 static void
pci_bios_init_bus(void)648 pci_bios_init_bus(void)
649 {
650 u8 extraroots = romfile_loadint("etc/extra-pci-roots", 0);
651 u8 pci_bus = 0;
652
653 pci_bios_init_bus_rec(0 /* host bus */, &pci_bus);
654
655 if (extraroots) {
656 while (pci_bus < 0xff) {
657 pci_bus++;
658 pci_bios_init_bus_rec(pci_bus, &pci_bus);
659 }
660 }
661 }
662
663
664 /****************************************************************
665 * Bus sizing
666 ****************************************************************/
667
668 static void
pci_bios_get_bar(struct pci_device * pci,int bar,int * ptype,u64 * psize,int * pis64)669 pci_bios_get_bar(struct pci_device *pci, int bar,
670 int *ptype, u64 *psize, int *pis64)
671 {
672 u32 ofs = pci_bar(pci, bar);
673 u16 bdf = pci->bdf;
674 u32 old = pci_config_readl(bdf, ofs);
675 int is64 = 0, type = PCI_REGION_TYPE_MEM;
676 u64 mask;
677
678 if (bar == PCI_ROM_SLOT) {
679 mask = PCI_ROM_ADDRESS_MASK;
680 pci_config_writel(bdf, ofs, mask);
681 } else {
682 if (old & PCI_BASE_ADDRESS_SPACE_IO) {
683 mask = PCI_BASE_ADDRESS_IO_MASK;
684 type = PCI_REGION_TYPE_IO;
685 } else {
686 mask = PCI_BASE_ADDRESS_MEM_MASK;
687 if (old & PCI_BASE_ADDRESS_MEM_PREFETCH)
688 type = PCI_REGION_TYPE_PREFMEM;
689 is64 = ((old & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
690 == PCI_BASE_ADDRESS_MEM_TYPE_64);
691 }
692 pci_config_writel(bdf, ofs, ~0);
693 }
694 u64 val = pci_config_readl(bdf, ofs);
695 pci_config_writel(bdf, ofs, old);
696 if (is64) {
697 u32 hold = pci_config_readl(bdf, ofs + 4);
698 pci_config_writel(bdf, ofs + 4, ~0);
699 u32 high = pci_config_readl(bdf, ofs + 4);
700 pci_config_writel(bdf, ofs + 4, hold);
701 val |= ((u64)high << 32);
702 mask |= ((u64)0xffffffff << 32);
703 *psize = (~(val & mask)) + 1;
704 } else {
705 *psize = ((~(val & mask)) + 1) & 0xffffffff;
706 }
707 *ptype = type;
708 *pis64 = is64;
709 }
710
pci_bios_bridge_region_is64(struct pci_region * r,struct pci_device * pci,int type)711 static int pci_bios_bridge_region_is64(struct pci_region *r,
712 struct pci_device *pci, int type)
713 {
714 if (type != PCI_REGION_TYPE_PREFMEM)
715 return 0;
716 u32 pmem = pci_config_readl(pci->bdf, PCI_PREF_MEMORY_BASE);
717 if (!pmem) {
718 pci_config_writel(pci->bdf, PCI_PREF_MEMORY_BASE, 0xfff0fff0);
719 pmem = pci_config_readl(pci->bdf, PCI_PREF_MEMORY_BASE);
720 pci_config_writel(pci->bdf, PCI_PREF_MEMORY_BASE, 0x0);
721 }
722 if ((pmem & PCI_PREF_RANGE_TYPE_MASK) != PCI_PREF_RANGE_TYPE_64)
723 return 0;
724 struct pci_region_entry *entry;
725 hlist_for_each_entry(entry, &r->list, node) {
726 if (!entry->is64)
727 return 0;
728 }
729 return 1;
730 }
731
pci_region_align(struct pci_region * r)732 static u64 pci_region_align(struct pci_region *r)
733 {
734 struct pci_region_entry *entry;
735 hlist_for_each_entry(entry, &r->list, node) {
736 // The first entry in the sorted list has the largest alignment
737 return entry->align;
738 }
739 return 1;
740 }
741
pci_region_sum(struct pci_region * r)742 static u64 pci_region_sum(struct pci_region *r)
743 {
744 u64 sum = 0;
745 struct pci_region_entry *entry;
746 hlist_for_each_entry(entry, &r->list, node) {
747 sum += entry->size;
748 }
749 return sum;
750 }
751
pci_region_migrate_64bit_entries(struct pci_region * from,struct pci_region * to)752 static void pci_region_migrate_64bit_entries(struct pci_region *from,
753 struct pci_region *to)
754 {
755 struct hlist_node *n, **last = &to->list.first;
756 struct pci_region_entry *entry;
757 hlist_for_each_entry_safe(entry, n, &from->list, node) {
758 if (!entry->is64)
759 continue;
760 if (entry->dev->class == PCI_CLASS_SERIAL_USB)
761 continue;
762 // Move from source list to destination list.
763 hlist_del(&entry->node);
764 hlist_add(&entry->node, last);
765 last = &entry->node.next;
766 }
767 }
768
769 static struct pci_region_entry *
pci_region_create_entry(struct pci_bus * bus,struct pci_device * dev,int bar,u64 size,u64 align,int type,int is64)770 pci_region_create_entry(struct pci_bus *bus, struct pci_device *dev,
771 int bar, u64 size, u64 align, int type, int is64)
772 {
773 struct pci_region_entry *entry = malloc_tmp(sizeof(*entry));
774 if (!entry) {
775 warn_noalloc();
776 return NULL;
777 }
778 memset(entry, 0, sizeof(*entry));
779 entry->dev = dev;
780 entry->bar = bar;
781 entry->size = size;
782 entry->align = align;
783 entry->is64 = is64;
784 entry->type = type;
785 // Insert into list in sorted order.
786 struct hlist_node **pprev;
787 struct pci_region_entry *pos;
788 hlist_for_each_entry_pprev(pos, pprev, &bus->r[type].list, node) {
789 if (pos->align < align || (pos->align == align && pos->size < size))
790 break;
791 }
792 hlist_add(&entry->node, pprev);
793 return entry;
794 }
795
pci_bus_hotplug_support(struct pci_bus * bus,u8 pcie_cap)796 static int pci_bus_hotplug_support(struct pci_bus *bus, u8 pcie_cap)
797 {
798 u8 shpc_cap;
799
800 if (pcie_cap) {
801 u16 pcie_flags = pci_config_readw(bus->bus_dev->bdf,
802 pcie_cap + PCI_EXP_FLAGS);
803 u8 port_type = ((pcie_flags & PCI_EXP_FLAGS_TYPE) >>
804 (__builtin_ffs(PCI_EXP_FLAGS_TYPE) - 1));
805 u8 downstream_port = (port_type == PCI_EXP_TYPE_DOWNSTREAM) ||
806 (port_type == PCI_EXP_TYPE_ROOT_PORT);
807 /*
808 * PCI Express SPEC, 7.8.2:
809 * Slot Implemented – When Set, this bit indicates that the Link
810 * HwInit associated with this Port is connected to a slot (as
811 * compared to being connected to a system-integrated device or
812 * being disabled).
813 * This bit is valid for Downstream Ports. This bit is undefined
814 * for Upstream Ports.
815 */
816 u16 slot_implemented = pcie_flags & PCI_EXP_FLAGS_SLOT;
817
818 return downstream_port && slot_implemented;
819 }
820
821 shpc_cap = pci_find_capability(bus->bus_dev->bdf, PCI_CAP_ID_SHPC, 0);
822 return !!shpc_cap;
823 }
824
825 /* Test whether bridge support forwarding of transactions
826 * of a specific type.
827 * Note: disables bridge's window registers as a side effect.
828 */
pci_bridge_has_region(struct pci_device * pci,enum pci_region_type region_type)829 static int pci_bridge_has_region(struct pci_device *pci,
830 enum pci_region_type region_type)
831 {
832 u8 base;
833
834 switch (region_type) {
835 case PCI_REGION_TYPE_IO:
836 base = PCI_IO_BASE;
837 break;
838 case PCI_REGION_TYPE_PREFMEM:
839 base = PCI_PREF_MEMORY_BASE;
840 break;
841 default:
842 /* Regular memory support is mandatory */
843 return 1;
844 }
845
846 pci_config_writeb(pci->bdf, base, 0xFF);
847
848 return pci_config_readb(pci->bdf, base) != 0;
849 }
850
pci_bios_check_devices(struct pci_bus * busses)851 static int pci_bios_check_devices(struct pci_bus *busses)
852 {
853 dprintf(1, "PCI: check devices\n");
854
855 // Calculate resources needed for regular (non-bus) devices.
856 struct pci_device *pci;
857 foreachpci(pci) {
858 if (pci->class == PCI_CLASS_BRIDGE_PCI)
859 busses[pci->secondary_bus].bus_dev = pci;
860
861 struct pci_bus *bus = &busses[pci_bdf_to_bus(pci->bdf)];
862 if (!bus->bus_dev)
863 /*
864 * Resources for all root busses go in busses[0]
865 */
866 bus = &busses[0];
867 int i;
868 for (i = 0; i < PCI_NUM_REGIONS; i++) {
869 if ((pci->class == PCI_CLASS_BRIDGE_PCI) &&
870 (i >= PCI_BRIDGE_NUM_REGIONS && i < PCI_ROM_SLOT))
871 continue;
872 int type, is64;
873 u64 size;
874 pci_bios_get_bar(pci, i, &type, &size, &is64);
875 if (size == 0)
876 continue;
877
878 if (type != PCI_REGION_TYPE_IO && size < PCI_DEVICE_MEM_MIN)
879 size = PCI_DEVICE_MEM_MIN;
880 struct pci_region_entry *entry = pci_region_create_entry(
881 bus, pci, i, size, size, type, is64);
882 if (!entry)
883 return -1;
884
885 if (is64)
886 i++;
887 }
888 }
889
890 // Propagate required bus resources to parent busses.
891 int secondary_bus;
892 for (secondary_bus=MaxPCIBus; secondary_bus>0; secondary_bus--) {
893 struct pci_bus *s = &busses[secondary_bus];
894 if (!s->bus_dev)
895 continue;
896 struct pci_bus *parent = &busses[pci_bdf_to_bus(s->bus_dev->bdf)];
897 if (!parent->bus_dev)
898 /*
899 * Resources for all root busses go in busses[0]
900 */
901 parent = &busses[0];
902 int type;
903 u16 bdf = s->bus_dev->bdf;
904 u8 pcie_cap = pci_find_capability(bdf, PCI_CAP_ID_EXP, 0);
905 u8 qemu_cap = pci_find_resource_reserve_capability(bdf);
906
907 int hotplug_support = pci_bus_hotplug_support(s, pcie_cap);
908 for (type = 0; type < PCI_REGION_TYPE_COUNT; type++) {
909 u64 align = (type == PCI_REGION_TYPE_IO) ?
910 PCI_BRIDGE_IO_MIN : PCI_BRIDGE_MEM_MIN;
911 if (!pci_bridge_has_region(s->bus_dev, type))
912 continue;
913 u64 size = 0;
914 if (qemu_cap) {
915 u32 tmp_size;
916 u64 tmp_size_64;
917 switch(type) {
918 case PCI_REGION_TYPE_IO:
919 tmp_size_64 = (pci_config_readl(bdf, qemu_cap + RES_RESERVE_IO) |
920 (u64)pci_config_readl(bdf, qemu_cap + RES_RESERVE_IO + 4) << 32);
921 if (tmp_size_64 != (u64)-1) {
922 size = tmp_size_64;
923 }
924 break;
925 case PCI_REGION_TYPE_MEM:
926 tmp_size = pci_config_readl(bdf, qemu_cap + RES_RESERVE_MEM);
927 if (tmp_size != (u32)-1) {
928 size = tmp_size;
929 }
930 break;
931 case PCI_REGION_TYPE_PREFMEM:
932 tmp_size = pci_config_readl(bdf, qemu_cap + RES_RESERVE_PREF_MEM_32);
933 tmp_size_64 = (pci_config_readl(bdf, qemu_cap + RES_RESERVE_PREF_MEM_64) |
934 (u64)pci_config_readl(bdf, qemu_cap + RES_RESERVE_PREF_MEM_64 + 4) << 32);
935 if (tmp_size != (u32)-1 && tmp_size_64 == (u64)-1) {
936 size = tmp_size;
937 } else if (tmp_size == (u32)-1 && tmp_size_64 != (u64)-1) {
938 size = tmp_size_64;
939 } else if (tmp_size != (u32)-1 && tmp_size_64 != (u64)-1) {
940 dprintf(1, "PCI: resource reserve cap PREF32 and PREF64"
941 " conflict\n");
942 }
943 break;
944 default:
945 break;
946 }
947 }
948 if (pci_region_align(&s->r[type]) > align)
949 align = pci_region_align(&s->r[type]);
950 u64 sum = pci_region_sum(&s->r[type]);
951 int resource_optional = pcie_cap && (type == PCI_REGION_TYPE_IO);
952 if (!sum && hotplug_support && !resource_optional)
953 sum = align; /* reserve min size for hot-plug */
954 if (size > sum) {
955 dprintf(1, "PCI: QEMU resource reserve cap: "
956 "size %08llx type %s\n",
957 size, region_type_name[type]);
958 if (type != PCI_REGION_TYPE_IO) {
959 size = ALIGN(size, align);
960 }
961 } else {
962 size = ALIGN(sum, align);
963 }
964 int is64 = pci_bios_bridge_region_is64(&s->r[type],
965 s->bus_dev, type);
966 // entry->bar is -1 if the entry represents a bridge region
967 struct pci_region_entry *entry = pci_region_create_entry(
968 parent, s->bus_dev, -1, size, align, type, is64);
969 if (!entry)
970 return -1;
971 dprintf(1, "PCI: secondary bus %d size %08llx type %s\n",
972 entry->dev->secondary_bus, size,
973 region_type_name[entry->type]);
974 }
975 }
976 return 0;
977 }
978
979
980 /****************************************************************
981 * BAR assignment
982 ****************************************************************/
983
984 // Setup region bases (given the regions' size and alignment)
pci_bios_init_root_regions_io(struct pci_bus * bus)985 static int pci_bios_init_root_regions_io(struct pci_bus *bus)
986 {
987 /*
988 * QEMU I/O address space usage:
989 * 0000 - 0fff legacy isa, pci config, pci root bus, ...
990 * 1000 - 9fff free
991 * a000 - afff hotplug (cpu, pci via acpi, i440fx/piix only)
992 * b000 - bfff power management (PORT_ACPI_PM_BASE)
993 * [ qemu 1.4+ implements pci config registers
994 * properly so guests can place the registers
995 * where they want, on older versions its fixed ]
996 * c000 - ffff free, traditionally used for pci io
997 */
998 struct pci_region *r_io = &bus->r[PCI_REGION_TYPE_IO];
999 u64 sum = pci_region_sum(r_io);
1000 if (sum < 0x4000) {
1001 /* traditional region is big enougth, use it */
1002 r_io->base = 0xc000;
1003 } else if (sum < pci_io_low_end - 0x1000) {
1004 /* use the larger region at 0x1000 */
1005 r_io->base = 0x1000;
1006 } else {
1007 /* not enouth io address space -> error out */
1008 return -1;
1009 }
1010 dprintf(1, "PCI: IO: %4llx - %4llx\n", r_io->base, r_io->base + sum - 1);
1011 return 0;
1012 }
1013
pci_bios_init_root_regions_mem(struct pci_bus * bus)1014 static int pci_bios_init_root_regions_mem(struct pci_bus *bus)
1015 {
1016 struct pci_region *r_end = &bus->r[PCI_REGION_TYPE_PREFMEM];
1017 struct pci_region *r_start = &bus->r[PCI_REGION_TYPE_MEM];
1018
1019 if (pci_region_align(r_start) < pci_region_align(r_end)) {
1020 // Swap regions to improve alignment.
1021 r_end = r_start;
1022 r_start = &bus->r[PCI_REGION_TYPE_PREFMEM];
1023 }
1024 u64 sum = pci_region_sum(r_end);
1025 u64 align = pci_region_align(r_end);
1026 r_end->base = ALIGN_DOWN((pcimem_end - sum), align);
1027 sum = pci_region_sum(r_start);
1028 align = pci_region_align(r_start);
1029 r_start->base = ALIGN_DOWN((r_end->base - sum), align);
1030
1031 if ((r_start->base < pcimem_start) ||
1032 (r_start->base > pcimem_end))
1033 // Memory range requested is larger than available.
1034 return -1;
1035 return 0;
1036 }
1037
1038 #define PCI_IO_SHIFT 8
1039 #define PCI_MEMORY_SHIFT 16
1040 #define PCI_PREF_MEMORY_SHIFT 16
1041
1042 static void
pci_region_map_one_entry(struct pci_region_entry * entry,u64 addr)1043 pci_region_map_one_entry(struct pci_region_entry *entry, u64 addr)
1044 {
1045 if (entry->bar >= 0) {
1046 dprintf(1, "PCI: map device bdf=%pP"
1047 " bar %d, addr %08llx, size %08llx [%s]\n",
1048 entry->dev,
1049 entry->bar, addr, entry->size, region_type_name[entry->type]);
1050
1051 pci_set_io_region_addr(entry->dev, entry->bar, addr, entry->is64);
1052 return;
1053 }
1054
1055 u16 bdf = entry->dev->bdf;
1056 u64 limit = addr + entry->size - 1;
1057 if (entry->type == PCI_REGION_TYPE_IO) {
1058 pci_config_writeb(bdf, PCI_IO_BASE, addr >> PCI_IO_SHIFT);
1059 pci_config_writew(bdf, PCI_IO_BASE_UPPER16, 0);
1060 pci_config_writeb(bdf, PCI_IO_LIMIT, limit >> PCI_IO_SHIFT);
1061 pci_config_writew(bdf, PCI_IO_LIMIT_UPPER16, 0);
1062 }
1063 if (entry->type == PCI_REGION_TYPE_MEM) {
1064 pci_config_writew(bdf, PCI_MEMORY_BASE, addr >> PCI_MEMORY_SHIFT);
1065 pci_config_writew(bdf, PCI_MEMORY_LIMIT, limit >> PCI_MEMORY_SHIFT);
1066 }
1067 if (entry->type == PCI_REGION_TYPE_PREFMEM) {
1068 pci_config_writew(bdf, PCI_PREF_MEMORY_BASE, addr >> PCI_PREF_MEMORY_SHIFT);
1069 pci_config_writew(bdf, PCI_PREF_MEMORY_LIMIT, limit >> PCI_PREF_MEMORY_SHIFT);
1070 pci_config_writel(bdf, PCI_PREF_BASE_UPPER32, addr >> 32);
1071 pci_config_writel(bdf, PCI_PREF_LIMIT_UPPER32, limit >> 32);
1072 }
1073 }
1074
pci_region_map_entries(struct pci_bus * busses,struct pci_region * r)1075 static void pci_region_map_entries(struct pci_bus *busses, struct pci_region *r)
1076 {
1077 struct hlist_node *n;
1078 struct pci_region_entry *entry;
1079 hlist_for_each_entry_safe(entry, n, &r->list, node) {
1080 u64 addr = r->base;
1081 r->base += entry->size;
1082 if (entry->bar == -1)
1083 // Update bus base address if entry is a bridge region
1084 busses[entry->dev->secondary_bus].r[entry->type].base = addr;
1085 pci_region_map_one_entry(entry, addr);
1086 hlist_del(&entry->node);
1087 free(entry);
1088 }
1089 }
1090
pci_bios_map_devices(struct pci_bus * busses)1091 static void pci_bios_map_devices(struct pci_bus *busses)
1092 {
1093 if (pci_bios_init_root_regions_io(busses))
1094 panic("PCI: out of I/O address space\n");
1095
1096 dprintf(1, "PCI: 32: %016llx - %016llx\n", pcimem_start, pcimem_end);
1097 if (pci_bios_init_root_regions_mem(busses)) {
1098 struct pci_region r64_mem, r64_pref;
1099 r64_mem.list.first = NULL;
1100 r64_pref.list.first = NULL;
1101 pci_region_migrate_64bit_entries(&busses[0].r[PCI_REGION_TYPE_MEM],
1102 &r64_mem);
1103 pci_region_migrate_64bit_entries(&busses[0].r[PCI_REGION_TYPE_PREFMEM],
1104 &r64_pref);
1105
1106 if (pci_bios_init_root_regions_mem(busses))
1107 panic("PCI: out of 32bit address space\n");
1108
1109 u64 sum_mem = pci_region_sum(&r64_mem);
1110 u64 sum_pref = pci_region_sum(&r64_pref);
1111 u64 align_mem = pci_region_align(&r64_mem);
1112 u64 align_pref = pci_region_align(&r64_pref);
1113
1114 r64_mem.base = le64_to_cpu(romfile_loadint("etc/reserved-memory-end", 0));
1115 if (r64_mem.base < 0x100000000LL + RamSizeOver4G)
1116 r64_mem.base = 0x100000000LL + RamSizeOver4G;
1117 r64_mem.base = ALIGN(r64_mem.base, align_mem);
1118 r64_mem.base = ALIGN(r64_mem.base, (1LL<<30)); // 1G hugepage
1119 r64_pref.base = r64_mem.base + sum_mem;
1120 r64_pref.base = ALIGN(r64_pref.base, align_pref);
1121 r64_pref.base = ALIGN(r64_pref.base, (1LL<<30)); // 1G hugepage
1122 pcimem64_start = r64_mem.base;
1123 pcimem64_end = r64_pref.base + sum_pref;
1124 pcimem64_end = ALIGN(pcimem64_end, (1LL<<30)); // 1G hugepage
1125 dprintf(1, "PCI: 64: %016llx - %016llx\n", pcimem64_start, pcimem64_end);
1126
1127 pci_region_map_entries(busses, &r64_mem);
1128 pci_region_map_entries(busses, &r64_pref);
1129 } else {
1130 // no bars mapped high -> drop 64bit window (see dsdt)
1131 pcimem64_start = 0;
1132 }
1133 // Map regions on each device.
1134 int bus;
1135 for (bus = 0; bus<=MaxPCIBus; bus++) {
1136 int type;
1137 for (type = 0; type < PCI_REGION_TYPE_COUNT; type++)
1138 pci_region_map_entries(busses, &busses[bus].r[type]);
1139 }
1140 }
1141
1142
1143 /****************************************************************
1144 * Main setup code
1145 ****************************************************************/
1146
1147 void
pci_setup(void)1148 pci_setup(void)
1149 {
1150 if (!CONFIG_QEMU)
1151 return;
1152
1153 dprintf(3, "pci setup\n");
1154
1155 dprintf(1, "=== PCI bus & bridge init ===\n");
1156 if (pci_probe_host() != 0) {
1157 return;
1158 }
1159 pci_bios_init_bus();
1160
1161 dprintf(1, "=== PCI device probing ===\n");
1162 pci_probe_devices();
1163
1164 pcimem_start = RamSize;
1165 pci_bios_init_platform();
1166
1167 dprintf(1, "=== PCI new allocation pass #1 ===\n");
1168 struct pci_bus *busses = malloc_tmp(sizeof(*busses) * (MaxPCIBus + 1));
1169 if (!busses) {
1170 warn_noalloc();
1171 return;
1172 }
1173 memset(busses, 0, sizeof(*busses) * (MaxPCIBus + 1));
1174 if (pci_bios_check_devices(busses))
1175 return;
1176
1177 dprintf(1, "=== PCI new allocation pass #2 ===\n");
1178 pci_bios_map_devices(busses);
1179
1180 pci_bios_init_devices();
1181
1182 free(busses);
1183
1184 pci_enable_default_vga();
1185 }
1186