1 /*
2 * QEMU PCI bus manager
3 *
4 * Copyright (c) 2004 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "qemu/datadir.h"
27 #include "qemu/units.h"
28 #include "hw/irq.h"
29 #include "hw/pci/pci.h"
30 #include "hw/pci/pci_bridge.h"
31 #include "hw/pci/pci_bus.h"
32 #include "hw/pci/pci_host.h"
33 #include "hw/qdev-properties.h"
34 #include "hw/qdev-properties-system.h"
35 #include "migration/qemu-file-types.h"
36 #include "migration/vmstate.h"
37 #include "net/net.h"
38 #include "sysemu/numa.h"
39 #include "sysemu/runstate.h"
40 #include "sysemu/sysemu.h"
41 #include "hw/loader.h"
42 #include "qemu/error-report.h"
43 #include "qemu/range.h"
44 #include "trace.h"
45 #include "hw/pci/msi.h"
46 #include "hw/pci/msix.h"
47 #include "hw/hotplug.h"
48 #include "hw/boards.h"
49 #include "qapi/error.h"
50 #include "qemu/cutils.h"
51 #include "pci-internal.h"
52
53 #include "hw/xen/xen.h"
54 #include "hw/i386/kvm/xen_evtchn.h"
55
56 //#define DEBUG_PCI
57 #ifdef DEBUG_PCI
58 # define PCI_DPRINTF(format, ...) printf(format, ## __VA_ARGS__)
59 #else
60 # define PCI_DPRINTF(format, ...) do { } while (0)
61 #endif
62
63 bool pci_available = true;
64
65 static char *pcibus_get_dev_path(DeviceState *dev);
66 static char *pcibus_get_fw_dev_path(DeviceState *dev);
67 static void pcibus_reset_hold(Object *obj, ResetType type);
68 static bool pcie_has_upstream_port(PCIDevice *dev);
69
prop_pci_busnr_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)70 static void prop_pci_busnr_get(Object *obj, Visitor *v, const char *name,
71 void *opaque, Error **errp)
72 {
73 uint8_t busnr = pci_dev_bus_num(PCI_DEVICE(obj));
74
75 visit_type_uint8(v, name, &busnr, errp);
76 }
77
78 static const PropertyInfo prop_pci_busnr = {
79 .name = "busnr",
80 .get = prop_pci_busnr_get,
81 };
82
83 static Property pci_props[] = {
84 DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1),
85 DEFINE_PROP_STRING("romfile", PCIDevice, romfile),
86 DEFINE_PROP_UINT32("romsize", PCIDevice, romsize, UINT32_MAX),
87 DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1),
88 DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present,
89 QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false),
90 DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present,
91 QEMU_PCIE_LNKSTA_DLLLA_BITNR, true),
92 DEFINE_PROP_BIT("x-pcie-extcap-init", PCIDevice, cap_present,
93 QEMU_PCIE_EXTCAP_INIT_BITNR, true),
94 DEFINE_PROP_STRING("failover_pair_id", PCIDevice,
95 failover_pair_id),
96 DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0),
97 DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present,
98 QEMU_PCIE_ERR_UNC_MASK_BITNR, true),
99 DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present,
100 QEMU_PCIE_ARI_NEXTFN_1_BITNR, false),
101 DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice,
102 max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE),
103 DEFINE_PROP_BIT("x-pcie-ext-tag", PCIDevice, cap_present,
104 QEMU_PCIE_EXT_TAG_BITNR, true),
105 { .name = "busnr", .info = &prop_pci_busnr },
106 DEFINE_PROP_END_OF_LIST()
107 };
108
109 static const VMStateDescription vmstate_pcibus = {
110 .name = "PCIBUS",
111 .version_id = 1,
112 .minimum_version_id = 1,
113 .fields = (const VMStateField[]) {
114 VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL),
115 VMSTATE_VARRAY_INT32(irq_count, PCIBus,
116 nirq, 0, vmstate_info_int32,
117 int32_t),
118 VMSTATE_END_OF_LIST()
119 }
120 };
121
g_cmp_uint32(gconstpointer a,gconstpointer b,gpointer user_data)122 static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data)
123 {
124 return a - b;
125 }
126
pci_acpi_index_list(void)127 static GSequence *pci_acpi_index_list(void)
128 {
129 static GSequence *used_acpi_index_list;
130
131 if (!used_acpi_index_list) {
132 used_acpi_index_list = g_sequence_new(NULL);
133 }
134 return used_acpi_index_list;
135 }
136
pci_init_bus_master(PCIDevice * pci_dev)137 static void pci_init_bus_master(PCIDevice *pci_dev)
138 {
139 AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev);
140
141 memory_region_init_alias(&pci_dev->bus_master_enable_region,
142 OBJECT(pci_dev), "bus master",
143 dma_as->root, 0, memory_region_size(dma_as->root));
144 memory_region_set_enabled(&pci_dev->bus_master_enable_region, false);
145 memory_region_add_subregion(&pci_dev->bus_master_container_region, 0,
146 &pci_dev->bus_master_enable_region);
147 }
148
pcibus_machine_done(Notifier * notifier,void * data)149 static void pcibus_machine_done(Notifier *notifier, void *data)
150 {
151 PCIBus *bus = container_of(notifier, PCIBus, machine_done);
152 int i;
153
154 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
155 if (bus->devices[i]) {
156 pci_init_bus_master(bus->devices[i]);
157 }
158 }
159 }
160
pci_bus_realize(BusState * qbus,Error ** errp)161 static void pci_bus_realize(BusState *qbus, Error **errp)
162 {
163 PCIBus *bus = PCI_BUS(qbus);
164
165 bus->machine_done.notify = pcibus_machine_done;
166 qemu_add_machine_init_done_notifier(&bus->machine_done);
167
168 vmstate_register_any(NULL, &vmstate_pcibus, bus);
169 }
170
pcie_bus_realize(BusState * qbus,Error ** errp)171 static void pcie_bus_realize(BusState *qbus, Error **errp)
172 {
173 PCIBus *bus = PCI_BUS(qbus);
174 Error *local_err = NULL;
175
176 pci_bus_realize(qbus, &local_err);
177 if (local_err) {
178 error_propagate(errp, local_err);
179 return;
180 }
181
182 /*
183 * A PCI-E bus can support extended config space if it's the root
184 * bus, or if the bus/bridge above it does as well
185 */
186 if (pci_bus_is_root(bus)) {
187 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE;
188 } else {
189 PCIBus *parent_bus = pci_get_bus(bus->parent_dev);
190
191 if (pci_bus_allows_extended_config_space(parent_bus)) {
192 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE;
193 }
194 }
195 }
196
pci_bus_unrealize(BusState * qbus)197 static void pci_bus_unrealize(BusState *qbus)
198 {
199 PCIBus *bus = PCI_BUS(qbus);
200
201 qemu_remove_machine_init_done_notifier(&bus->machine_done);
202
203 vmstate_unregister(NULL, &vmstate_pcibus, bus);
204 }
205
pcibus_num(PCIBus * bus)206 static int pcibus_num(PCIBus *bus)
207 {
208 if (pci_bus_is_root(bus)) {
209 return 0; /* pci host bridge */
210 }
211 return bus->parent_dev->config[PCI_SECONDARY_BUS];
212 }
213
pcibus_numa_node(PCIBus * bus)214 static uint16_t pcibus_numa_node(PCIBus *bus)
215 {
216 return NUMA_NODE_UNASSIGNED;
217 }
218
pci_bus_class_init(ObjectClass * klass,void * data)219 static void pci_bus_class_init(ObjectClass *klass, void *data)
220 {
221 BusClass *k = BUS_CLASS(klass);
222 PCIBusClass *pbc = PCI_BUS_CLASS(klass);
223 ResettableClass *rc = RESETTABLE_CLASS(klass);
224
225 k->print_dev = pcibus_dev_print;
226 k->get_dev_path = pcibus_get_dev_path;
227 k->get_fw_dev_path = pcibus_get_fw_dev_path;
228 k->realize = pci_bus_realize;
229 k->unrealize = pci_bus_unrealize;
230
231 rc->phases.hold = pcibus_reset_hold;
232
233 pbc->bus_num = pcibus_num;
234 pbc->numa_node = pcibus_numa_node;
235 }
236
237 static const TypeInfo pci_bus_info = {
238 .name = TYPE_PCI_BUS,
239 .parent = TYPE_BUS,
240 .instance_size = sizeof(PCIBus),
241 .class_size = sizeof(PCIBusClass),
242 .class_init = pci_bus_class_init,
243 };
244
245 static const TypeInfo cxl_interface_info = {
246 .name = INTERFACE_CXL_DEVICE,
247 .parent = TYPE_INTERFACE,
248 };
249
250 static const TypeInfo pcie_interface_info = {
251 .name = INTERFACE_PCIE_DEVICE,
252 .parent = TYPE_INTERFACE,
253 };
254
255 static const TypeInfo conventional_pci_interface_info = {
256 .name = INTERFACE_CONVENTIONAL_PCI_DEVICE,
257 .parent = TYPE_INTERFACE,
258 };
259
pcie_bus_class_init(ObjectClass * klass,void * data)260 static void pcie_bus_class_init(ObjectClass *klass, void *data)
261 {
262 BusClass *k = BUS_CLASS(klass);
263
264 k->realize = pcie_bus_realize;
265 }
266
267 static const TypeInfo pcie_bus_info = {
268 .name = TYPE_PCIE_BUS,
269 .parent = TYPE_PCI_BUS,
270 .class_init = pcie_bus_class_init,
271 };
272
273 static const TypeInfo cxl_bus_info = {
274 .name = TYPE_CXL_BUS,
275 .parent = TYPE_PCIE_BUS,
276 .class_init = pcie_bus_class_init,
277 };
278
279 static void pci_update_mappings(PCIDevice *d);
280 static void pci_irq_handler(void *opaque, int irq_num, int level);
281 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **);
282 static void pci_del_option_rom(PCIDevice *pdev);
283
284 static uint16_t pci_default_sub_vendor_id = PCI_SUBVENDOR_ID_REDHAT_QUMRANET;
285 static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU;
286
287 PCIHostStateList pci_host_bridges;
288
pci_bar(PCIDevice * d,int reg)289 int pci_bar(PCIDevice *d, int reg)
290 {
291 uint8_t type;
292
293 /* PCIe virtual functions do not have their own BARs */
294 assert(!pci_is_vf(d));
295
296 if (reg != PCI_ROM_SLOT)
297 return PCI_BASE_ADDRESS_0 + reg * 4;
298
299 type = d->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
300 return type == PCI_HEADER_TYPE_BRIDGE ? PCI_ROM_ADDRESS1 : PCI_ROM_ADDRESS;
301 }
302
pci_irq_state(PCIDevice * d,int irq_num)303 static inline int pci_irq_state(PCIDevice *d, int irq_num)
304 {
305 return (d->irq_state >> irq_num) & 0x1;
306 }
307
pci_set_irq_state(PCIDevice * d,int irq_num,int level)308 static inline void pci_set_irq_state(PCIDevice *d, int irq_num, int level)
309 {
310 d->irq_state &= ~(0x1 << irq_num);
311 d->irq_state |= level << irq_num;
312 }
313
pci_bus_change_irq_level(PCIBus * bus,int irq_num,int change)314 static void pci_bus_change_irq_level(PCIBus *bus, int irq_num, int change)
315 {
316 assert(irq_num >= 0);
317 assert(irq_num < bus->nirq);
318 bus->irq_count[irq_num] += change;
319 bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0);
320 }
321
pci_change_irq_level(PCIDevice * pci_dev,int irq_num,int change)322 static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change)
323 {
324 PCIBus *bus;
325 for (;;) {
326 int dev_irq = irq_num;
327 bus = pci_get_bus(pci_dev);
328 assert(bus->map_irq);
329 irq_num = bus->map_irq(pci_dev, irq_num);
330 trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num,
331 pci_bus_is_root(bus) ? "root-complex"
332 : DEVICE(bus->parent_dev)->canonical_path);
333 if (bus->set_irq)
334 break;
335 pci_dev = bus->parent_dev;
336 }
337 pci_bus_change_irq_level(bus, irq_num, change);
338 }
339
pci_bus_get_irq_level(PCIBus * bus,int irq_num)340 int pci_bus_get_irq_level(PCIBus *bus, int irq_num)
341 {
342 assert(irq_num >= 0);
343 assert(irq_num < bus->nirq);
344 return !!bus->irq_count[irq_num];
345 }
346
347 /* Update interrupt status bit in config space on interrupt
348 * state change. */
pci_update_irq_status(PCIDevice * dev)349 static void pci_update_irq_status(PCIDevice *dev)
350 {
351 if (dev->irq_state) {
352 dev->config[PCI_STATUS] |= PCI_STATUS_INTERRUPT;
353 } else {
354 dev->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
355 }
356 }
357
pci_device_deassert_intx(PCIDevice * dev)358 void pci_device_deassert_intx(PCIDevice *dev)
359 {
360 int i;
361 for (i = 0; i < PCI_NUM_PINS; ++i) {
362 pci_irq_handler(dev, i, 0);
363 }
364 }
365
pci_msi_trigger(PCIDevice * dev,MSIMessage msg)366 static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg)
367 {
368 MemTxAttrs attrs = {};
369
370 /*
371 * Xen uses the high bits of the address to contain some of the bits
372 * of the PIRQ#. Therefore we can't just send the write cycle and
373 * trust that it's caught by the APIC at 0xfee00000 because the
374 * target of the write might be e.g. 0x0x1000fee46000 for PIRQ#4166.
375 * So we intercept the delivery here instead of in kvm_send_msi().
376 */
377 if (xen_mode == XEN_EMULATE &&
378 xen_evtchn_deliver_pirq_msi(msg.address, msg.data)) {
379 return;
380 }
381 attrs.requester_id = pci_requester_id(dev);
382 address_space_stl_le(&dev->bus_master_as, msg.address, msg.data,
383 attrs, NULL);
384 }
385
pci_reset_regions(PCIDevice * dev)386 static void pci_reset_regions(PCIDevice *dev)
387 {
388 int r;
389 if (pci_is_vf(dev)) {
390 return;
391 }
392
393 for (r = 0; r < PCI_NUM_REGIONS; ++r) {
394 PCIIORegion *region = &dev->io_regions[r];
395 if (!region->size) {
396 continue;
397 }
398
399 if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) &&
400 region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
401 pci_set_quad(dev->config + pci_bar(dev, r), region->type);
402 } else {
403 pci_set_long(dev->config + pci_bar(dev, r), region->type);
404 }
405 }
406 }
407
pci_do_device_reset(PCIDevice * dev)408 static void pci_do_device_reset(PCIDevice *dev)
409 {
410 pci_device_deassert_intx(dev);
411 assert(dev->irq_state == 0);
412
413 /* Clear all writable bits */
414 pci_word_test_and_clear_mask(dev->config + PCI_COMMAND,
415 pci_get_word(dev->wmask + PCI_COMMAND) |
416 pci_get_word(dev->w1cmask + PCI_COMMAND));
417 pci_word_test_and_clear_mask(dev->config + PCI_STATUS,
418 pci_get_word(dev->wmask + PCI_STATUS) |
419 pci_get_word(dev->w1cmask + PCI_STATUS));
420 /* Some devices make bits of PCI_INTERRUPT_LINE read only */
421 pci_byte_test_and_clear_mask(dev->config + PCI_INTERRUPT_LINE,
422 pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) |
423 pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE));
424 dev->config[PCI_CACHE_LINE_SIZE] = 0x0;
425 pci_reset_regions(dev);
426 pci_update_mappings(dev);
427
428 msi_reset(dev);
429 msix_reset(dev);
430 pcie_sriov_pf_reset(dev);
431 }
432
433 /*
434 * This function is called on #RST and FLR.
435 * FLR if PCI_EXP_DEVCTL_BCR_FLR is set
436 */
pci_device_reset(PCIDevice * dev)437 void pci_device_reset(PCIDevice *dev)
438 {
439 device_cold_reset(&dev->qdev);
440 pci_do_device_reset(dev);
441 }
442
443 /*
444 * Trigger pci bus reset under a given bus.
445 * Called via bus_cold_reset on RST# assert, after the devices
446 * have been reset device_cold_reset-ed already.
447 */
pcibus_reset_hold(Object * obj,ResetType type)448 static void pcibus_reset_hold(Object *obj, ResetType type)
449 {
450 PCIBus *bus = PCI_BUS(obj);
451 int i;
452
453 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
454 if (bus->devices[i]) {
455 pci_do_device_reset(bus->devices[i]);
456 }
457 }
458
459 for (i = 0; i < bus->nirq; i++) {
460 assert(bus->irq_count[i] == 0);
461 }
462 }
463
pci_host_bus_register(DeviceState * host)464 static void pci_host_bus_register(DeviceState *host)
465 {
466 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host);
467
468 QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next);
469 }
470
pci_host_bus_unregister(DeviceState * host)471 static void pci_host_bus_unregister(DeviceState *host)
472 {
473 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host);
474
475 QLIST_REMOVE(host_bridge, next);
476 }
477
pci_device_root_bus(const PCIDevice * d)478 PCIBus *pci_device_root_bus(const PCIDevice *d)
479 {
480 PCIBus *bus = pci_get_bus(d);
481
482 while (!pci_bus_is_root(bus)) {
483 d = bus->parent_dev;
484 assert(d != NULL);
485
486 bus = pci_get_bus(d);
487 }
488
489 return bus;
490 }
491
pci_root_bus_path(PCIDevice * dev)492 const char *pci_root_bus_path(PCIDevice *dev)
493 {
494 PCIBus *rootbus = pci_device_root_bus(dev);
495 PCIHostState *host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent);
496 PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_GET_CLASS(host_bridge);
497
498 assert(host_bridge->bus == rootbus);
499
500 if (hc->root_bus_path) {
501 return (*hc->root_bus_path)(host_bridge, rootbus);
502 }
503
504 return rootbus->qbus.name;
505 }
506
pci_bus_bypass_iommu(PCIBus * bus)507 bool pci_bus_bypass_iommu(PCIBus *bus)
508 {
509 PCIBus *rootbus = bus;
510 PCIHostState *host_bridge;
511
512 if (!pci_bus_is_root(bus)) {
513 rootbus = pci_device_root_bus(bus->parent_dev);
514 }
515
516 host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent);
517
518 assert(host_bridge->bus == rootbus);
519
520 return host_bridge->bypass_iommu;
521 }
522
pci_root_bus_internal_init(PCIBus * bus,DeviceState * parent,MemoryRegion * mem,MemoryRegion * io,uint8_t devfn_min)523 static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent,
524 MemoryRegion *mem, MemoryRegion *io,
525 uint8_t devfn_min)
526 {
527 assert(PCI_FUNC(devfn_min) == 0);
528 bus->devfn_min = devfn_min;
529 bus->slot_reserved_mask = 0x0;
530 bus->address_space_mem = mem;
531 bus->address_space_io = io;
532 bus->flags |= PCI_BUS_IS_ROOT;
533
534 /* host bridge */
535 QLIST_INIT(&bus->child);
536
537 pci_host_bus_register(parent);
538 }
539
pci_bus_uninit(PCIBus * bus)540 static void pci_bus_uninit(PCIBus *bus)
541 {
542 pci_host_bus_unregister(BUS(bus)->parent);
543 }
544
pci_bus_is_express(const PCIBus * bus)545 bool pci_bus_is_express(const PCIBus *bus)
546 {
547 return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS);
548 }
549
pci_root_bus_init(PCIBus * bus,size_t bus_size,DeviceState * parent,const char * name,MemoryRegion * mem,MemoryRegion * io,uint8_t devfn_min,const char * typename)550 void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent,
551 const char *name,
552 MemoryRegion *mem, MemoryRegion *io,
553 uint8_t devfn_min, const char *typename)
554 {
555 qbus_init(bus, bus_size, typename, parent, name);
556 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min);
557 }
558
pci_root_bus_new(DeviceState * parent,const char * name,MemoryRegion * mem,MemoryRegion * io,uint8_t devfn_min,const char * typename)559 PCIBus *pci_root_bus_new(DeviceState *parent, const char *name,
560 MemoryRegion *mem, MemoryRegion *io,
561 uint8_t devfn_min, const char *typename)
562 {
563 PCIBus *bus;
564
565 bus = PCI_BUS(qbus_new(typename, parent, name));
566 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min);
567 return bus;
568 }
569
pci_root_bus_cleanup(PCIBus * bus)570 void pci_root_bus_cleanup(PCIBus *bus)
571 {
572 pci_bus_uninit(bus);
573 /* the caller of the unplug hotplug handler will delete this device */
574 qbus_unrealize(BUS(bus));
575 }
576
pci_bus_irqs(PCIBus * bus,pci_set_irq_fn set_irq,void * irq_opaque,int nirq)577 void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq,
578 void *irq_opaque, int nirq)
579 {
580 bus->set_irq = set_irq;
581 bus->irq_opaque = irq_opaque;
582 bus->nirq = nirq;
583 g_free(bus->irq_count);
584 bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0]));
585 }
586
pci_bus_map_irqs(PCIBus * bus,pci_map_irq_fn map_irq)587 void pci_bus_map_irqs(PCIBus *bus, pci_map_irq_fn map_irq)
588 {
589 bus->map_irq = map_irq;
590 }
591
pci_bus_irqs_cleanup(PCIBus * bus)592 void pci_bus_irqs_cleanup(PCIBus *bus)
593 {
594 bus->set_irq = NULL;
595 bus->map_irq = NULL;
596 bus->irq_opaque = NULL;
597 bus->nirq = 0;
598 g_free(bus->irq_count);
599 bus->irq_count = NULL;
600 }
601
pci_register_root_bus(DeviceState * parent,const char * name,pci_set_irq_fn set_irq,pci_map_irq_fn map_irq,void * irq_opaque,MemoryRegion * mem,MemoryRegion * io,uint8_t devfn_min,int nirq,const char * typename)602 PCIBus *pci_register_root_bus(DeviceState *parent, const char *name,
603 pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
604 void *irq_opaque,
605 MemoryRegion *mem, MemoryRegion *io,
606 uint8_t devfn_min, int nirq,
607 const char *typename)
608 {
609 PCIBus *bus;
610
611 bus = pci_root_bus_new(parent, name, mem, io, devfn_min, typename);
612 pci_bus_irqs(bus, set_irq, irq_opaque, nirq);
613 pci_bus_map_irqs(bus, map_irq);
614 return bus;
615 }
616
pci_unregister_root_bus(PCIBus * bus)617 void pci_unregister_root_bus(PCIBus *bus)
618 {
619 pci_bus_irqs_cleanup(bus);
620 pci_root_bus_cleanup(bus);
621 }
622
pci_bus_num(PCIBus * s)623 int pci_bus_num(PCIBus *s)
624 {
625 return PCI_BUS_GET_CLASS(s)->bus_num(s);
626 }
627
628 /* Returns the min and max bus numbers of a PCI bus hierarchy */
pci_bus_range(PCIBus * bus,int * min_bus,int * max_bus)629 void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus)
630 {
631 int i;
632 *min_bus = *max_bus = pci_bus_num(bus);
633
634 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
635 PCIDevice *dev = bus->devices[i];
636
637 if (dev && IS_PCI_BRIDGE(dev)) {
638 *min_bus = MIN(*min_bus, dev->config[PCI_SECONDARY_BUS]);
639 *max_bus = MAX(*max_bus, dev->config[PCI_SUBORDINATE_BUS]);
640 }
641 }
642 }
643
pci_bus_numa_node(PCIBus * bus)644 int pci_bus_numa_node(PCIBus *bus)
645 {
646 return PCI_BUS_GET_CLASS(bus)->numa_node(bus);
647 }
648
get_pci_config_device(QEMUFile * f,void * pv,size_t size,const VMStateField * field)649 static int get_pci_config_device(QEMUFile *f, void *pv, size_t size,
650 const VMStateField *field)
651 {
652 PCIDevice *s = container_of(pv, PCIDevice, config);
653 uint8_t *config;
654 int i;
655
656 assert(size == pci_config_size(s));
657 config = g_malloc(size);
658
659 qemu_get_buffer(f, config, size);
660 for (i = 0; i < size; ++i) {
661 if ((config[i] ^ s->config[i]) &
662 s->cmask[i] & ~s->wmask[i] & ~s->w1cmask[i]) {
663 error_report("%s: Bad config data: i=0x%x read: %x device: %x "
664 "cmask: %x wmask: %x w1cmask:%x", __func__,
665 i, config[i], s->config[i],
666 s->cmask[i], s->wmask[i], s->w1cmask[i]);
667 g_free(config);
668 return -EINVAL;
669 }
670 }
671 memcpy(s->config, config, size);
672
673 pci_update_mappings(s);
674 if (IS_PCI_BRIDGE(s)) {
675 pci_bridge_update_mappings(PCI_BRIDGE(s));
676 }
677
678 memory_region_set_enabled(&s->bus_master_enable_region,
679 pci_get_word(s->config + PCI_COMMAND)
680 & PCI_COMMAND_MASTER);
681
682 g_free(config);
683 return 0;
684 }
685
686 /* just put buffer */
put_pci_config_device(QEMUFile * f,void * pv,size_t size,const VMStateField * field,JSONWriter * vmdesc)687 static int put_pci_config_device(QEMUFile *f, void *pv, size_t size,
688 const VMStateField *field, JSONWriter *vmdesc)
689 {
690 const uint8_t **v = pv;
691 assert(size == pci_config_size(container_of(pv, PCIDevice, config)));
692 qemu_put_buffer(f, *v, size);
693
694 return 0;
695 }
696
697 static const VMStateInfo vmstate_info_pci_config = {
698 .name = "pci config",
699 .get = get_pci_config_device,
700 .put = put_pci_config_device,
701 };
702
get_pci_irq_state(QEMUFile * f,void * pv,size_t size,const VMStateField * field)703 static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size,
704 const VMStateField *field)
705 {
706 PCIDevice *s = container_of(pv, PCIDevice, irq_state);
707 uint32_t irq_state[PCI_NUM_PINS];
708 int i;
709 for (i = 0; i < PCI_NUM_PINS; ++i) {
710 irq_state[i] = qemu_get_be32(f);
711 if (irq_state[i] != 0x1 && irq_state[i] != 0) {
712 fprintf(stderr, "irq state %d: must be 0 or 1.\n",
713 irq_state[i]);
714 return -EINVAL;
715 }
716 }
717
718 for (i = 0; i < PCI_NUM_PINS; ++i) {
719 pci_set_irq_state(s, i, irq_state[i]);
720 }
721
722 return 0;
723 }
724
put_pci_irq_state(QEMUFile * f,void * pv,size_t size,const VMStateField * field,JSONWriter * vmdesc)725 static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size,
726 const VMStateField *field, JSONWriter *vmdesc)
727 {
728 int i;
729 PCIDevice *s = container_of(pv, PCIDevice, irq_state);
730
731 for (i = 0; i < PCI_NUM_PINS; ++i) {
732 qemu_put_be32(f, pci_irq_state(s, i));
733 }
734
735 return 0;
736 }
737
738 static const VMStateInfo vmstate_info_pci_irq_state = {
739 .name = "pci irq state",
740 .get = get_pci_irq_state,
741 .put = put_pci_irq_state,
742 };
743
migrate_is_pcie(void * opaque,int version_id)744 static bool migrate_is_pcie(void *opaque, int version_id)
745 {
746 return pci_is_express((PCIDevice *)opaque);
747 }
748
migrate_is_not_pcie(void * opaque,int version_id)749 static bool migrate_is_not_pcie(void *opaque, int version_id)
750 {
751 return !pci_is_express((PCIDevice *)opaque);
752 }
753
754 const VMStateDescription vmstate_pci_device = {
755 .name = "PCIDevice",
756 .version_id = 2,
757 .minimum_version_id = 1,
758 .fields = (const VMStateField[]) {
759 VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice),
760 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice,
761 migrate_is_not_pcie,
762 0, vmstate_info_pci_config,
763 PCI_CONFIG_SPACE_SIZE),
764 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice,
765 migrate_is_pcie,
766 0, vmstate_info_pci_config,
767 PCIE_CONFIG_SPACE_SIZE),
768 VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2,
769 vmstate_info_pci_irq_state,
770 PCI_NUM_PINS * sizeof(int32_t)),
771 VMSTATE_END_OF_LIST()
772 }
773 };
774
775
pci_device_save(PCIDevice * s,QEMUFile * f)776 void pci_device_save(PCIDevice *s, QEMUFile *f)
777 {
778 /* Clear interrupt status bit: it is implicit
779 * in irq_state which we are saving.
780 * This makes us compatible with old devices
781 * which never set or clear this bit. */
782 s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
783 vmstate_save_state(f, &vmstate_pci_device, s, NULL);
784 /* Restore the interrupt status bit. */
785 pci_update_irq_status(s);
786 }
787
pci_device_load(PCIDevice * s,QEMUFile * f)788 int pci_device_load(PCIDevice *s, QEMUFile *f)
789 {
790 int ret;
791 ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id);
792 /* Restore the interrupt status bit. */
793 pci_update_irq_status(s);
794 return ret;
795 }
796
pci_set_default_subsystem_id(PCIDevice * pci_dev)797 static void pci_set_default_subsystem_id(PCIDevice *pci_dev)
798 {
799 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
800 pci_default_sub_vendor_id);
801 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
802 pci_default_sub_device_id);
803 }
804
805 /*
806 * Parse [[<domain>:]<bus>:]<slot>, return -1 on error if funcp == NULL
807 * [[<domain>:]<bus>:]<slot>.<func>, return -1 on error
808 */
pci_parse_devaddr(const char * addr,int * domp,int * busp,unsigned int * slotp,unsigned int * funcp)809 static int pci_parse_devaddr(const char *addr, int *domp, int *busp,
810 unsigned int *slotp, unsigned int *funcp)
811 {
812 const char *p;
813 char *e;
814 unsigned long val;
815 unsigned long dom = 0, bus = 0;
816 unsigned int slot = 0;
817 unsigned int func = 0;
818
819 p = addr;
820 val = strtoul(p, &e, 16);
821 if (e == p)
822 return -1;
823 if (*e == ':') {
824 bus = val;
825 p = e + 1;
826 val = strtoul(p, &e, 16);
827 if (e == p)
828 return -1;
829 if (*e == ':') {
830 dom = bus;
831 bus = val;
832 p = e + 1;
833 val = strtoul(p, &e, 16);
834 if (e == p)
835 return -1;
836 }
837 }
838
839 slot = val;
840
841 if (funcp != NULL) {
842 if (*e != '.')
843 return -1;
844
845 p = e + 1;
846 val = strtoul(p, &e, 16);
847 if (e == p)
848 return -1;
849
850 func = val;
851 }
852
853 /* if funcp == NULL func is 0 */
854 if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7)
855 return -1;
856
857 if (*e)
858 return -1;
859
860 *domp = dom;
861 *busp = bus;
862 *slotp = slot;
863 if (funcp != NULL)
864 *funcp = func;
865 return 0;
866 }
867
pci_init_cmask(PCIDevice * dev)868 static void pci_init_cmask(PCIDevice *dev)
869 {
870 pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff);
871 pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff);
872 dev->cmask[PCI_STATUS] = PCI_STATUS_CAP_LIST;
873 dev->cmask[PCI_REVISION_ID] = 0xff;
874 dev->cmask[PCI_CLASS_PROG] = 0xff;
875 pci_set_word(dev->cmask + PCI_CLASS_DEVICE, 0xffff);
876 dev->cmask[PCI_HEADER_TYPE] = 0xff;
877 dev->cmask[PCI_CAPABILITY_LIST] = 0xff;
878 }
879
pci_init_wmask(PCIDevice * dev)880 static void pci_init_wmask(PCIDevice *dev)
881 {
882 int config_size = pci_config_size(dev);
883
884 dev->wmask[PCI_CACHE_LINE_SIZE] = 0xff;
885 dev->wmask[PCI_INTERRUPT_LINE] = 0xff;
886 pci_set_word(dev->wmask + PCI_COMMAND,
887 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
888 PCI_COMMAND_INTX_DISABLE);
889 pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR);
890
891 memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff,
892 config_size - PCI_CONFIG_HEADER_SIZE);
893 }
894
pci_init_w1cmask(PCIDevice * dev)895 static void pci_init_w1cmask(PCIDevice *dev)
896 {
897 /*
898 * Note: It's okay to set w1cmask even for readonly bits as
899 * long as their value is hardwired to 0.
900 */
901 pci_set_word(dev->w1cmask + PCI_STATUS,
902 PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT |
903 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT |
904 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY);
905 }
906
pci_init_mask_bridge(PCIDevice * d)907 static void pci_init_mask_bridge(PCIDevice *d)
908 {
909 /* PCI_PRIMARY_BUS, PCI_SECONDARY_BUS, PCI_SUBORDINATE_BUS and
910 PCI_SEC_LATENCY_TIMER */
911 memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4);
912
913 /* base and limit */
914 d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff;
915 d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff;
916 pci_set_word(d->wmask + PCI_MEMORY_BASE,
917 PCI_MEMORY_RANGE_MASK & 0xffff);
918 pci_set_word(d->wmask + PCI_MEMORY_LIMIT,
919 PCI_MEMORY_RANGE_MASK & 0xffff);
920 pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE,
921 PCI_PREF_RANGE_MASK & 0xffff);
922 pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT,
923 PCI_PREF_RANGE_MASK & 0xffff);
924
925 /* PCI_PREF_BASE_UPPER32 and PCI_PREF_LIMIT_UPPER32 */
926 memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8);
927
928 /* Supported memory and i/o types */
929 d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16;
930 d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16;
931 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE,
932 PCI_PREF_RANGE_TYPE_64);
933 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT,
934 PCI_PREF_RANGE_TYPE_64);
935
936 /*
937 * TODO: Bridges default to 10-bit VGA decoding but we currently only
938 * implement 16-bit decoding (no alias support).
939 */
940 pci_set_word(d->wmask + PCI_BRIDGE_CONTROL,
941 PCI_BRIDGE_CTL_PARITY |
942 PCI_BRIDGE_CTL_SERR |
943 PCI_BRIDGE_CTL_ISA |
944 PCI_BRIDGE_CTL_VGA |
945 PCI_BRIDGE_CTL_VGA_16BIT |
946 PCI_BRIDGE_CTL_MASTER_ABORT |
947 PCI_BRIDGE_CTL_BUS_RESET |
948 PCI_BRIDGE_CTL_FAST_BACK |
949 PCI_BRIDGE_CTL_DISCARD |
950 PCI_BRIDGE_CTL_SEC_DISCARD |
951 PCI_BRIDGE_CTL_DISCARD_SERR);
952 /* Below does not do anything as we never set this bit, put here for
953 * completeness. */
954 pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL,
955 PCI_BRIDGE_CTL_DISCARD_STATUS);
956 d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK;
957 d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK;
958 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE,
959 PCI_PREF_RANGE_TYPE_MASK);
960 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT,
961 PCI_PREF_RANGE_TYPE_MASK);
962 }
963
pci_init_multifunction(PCIBus * bus,PCIDevice * dev,Error ** errp)964 static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp)
965 {
966 uint8_t slot = PCI_SLOT(dev->devfn);
967 uint8_t func;
968
969 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
970 dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
971 }
972
973 /*
974 * With SR/IOV and ARI, a device at function 0 need not be a multifunction
975 * device, as it may just be a VF that ended up with function 0 in
976 * the legacy PCI interpretation. Avoid failing in such cases:
977 */
978 if (pci_is_vf(dev) &&
979 dev->exp.sriov_vf.pf->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
980 return;
981 }
982
983 /*
984 * multifunction bit is interpreted in two ways as follows.
985 * - all functions must set the bit to 1.
986 * Example: Intel X53
987 * - function 0 must set the bit, but the rest function (> 0)
988 * is allowed to leave the bit to 0.
989 * Example: PIIX3(also in qemu), PIIX4(also in qemu), ICH10,
990 *
991 * So OS (at least Linux) checks the bit of only function 0,
992 * and doesn't see the bit of function > 0.
993 *
994 * The below check allows both interpretation.
995 */
996 if (PCI_FUNC(dev->devfn)) {
997 PCIDevice *f0 = bus->devices[PCI_DEVFN(slot, 0)];
998 if (f0 && !(f0->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) {
999 /* function 0 should set multifunction bit */
1000 error_setg(errp, "PCI: single function device can't be populated "
1001 "in function %x.%x", slot, PCI_FUNC(dev->devfn));
1002 return;
1003 }
1004 return;
1005 }
1006
1007 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
1008 return;
1009 }
1010 /* function 0 indicates single function, so function > 0 must be NULL */
1011 for (func = 1; func < PCI_FUNC_MAX; ++func) {
1012 if (bus->devices[PCI_DEVFN(slot, func)]) {
1013 error_setg(errp, "PCI: %x.0 indicates single function, "
1014 "but %x.%x is already populated.",
1015 slot, slot, func);
1016 return;
1017 }
1018 }
1019 }
1020
pci_config_alloc(PCIDevice * pci_dev)1021 static void pci_config_alloc(PCIDevice *pci_dev)
1022 {
1023 int config_size = pci_config_size(pci_dev);
1024
1025 pci_dev->config = g_malloc0(config_size);
1026 pci_dev->cmask = g_malloc0(config_size);
1027 pci_dev->wmask = g_malloc0(config_size);
1028 pci_dev->w1cmask = g_malloc0(config_size);
1029 pci_dev->used = g_malloc0(config_size);
1030 }
1031
pci_config_free(PCIDevice * pci_dev)1032 static void pci_config_free(PCIDevice *pci_dev)
1033 {
1034 g_free(pci_dev->config);
1035 g_free(pci_dev->cmask);
1036 g_free(pci_dev->wmask);
1037 g_free(pci_dev->w1cmask);
1038 g_free(pci_dev->used);
1039 }
1040
do_pci_unregister_device(PCIDevice * pci_dev)1041 static void do_pci_unregister_device(PCIDevice *pci_dev)
1042 {
1043 pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL;
1044 pci_config_free(pci_dev);
1045
1046 if (xen_mode == XEN_EMULATE) {
1047 xen_evtchn_remove_pci_device(pci_dev);
1048 }
1049 if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) {
1050 memory_region_del_subregion(&pci_dev->bus_master_container_region,
1051 &pci_dev->bus_master_enable_region);
1052 }
1053 address_space_destroy(&pci_dev->bus_master_as);
1054 }
1055
1056 /* Extract PCIReqIDCache into BDF format */
pci_req_id_cache_extract(PCIReqIDCache * cache)1057 static uint16_t pci_req_id_cache_extract(PCIReqIDCache *cache)
1058 {
1059 uint8_t bus_n;
1060 uint16_t result;
1061
1062 switch (cache->type) {
1063 case PCI_REQ_ID_BDF:
1064 result = pci_get_bdf(cache->dev);
1065 break;
1066 case PCI_REQ_ID_SECONDARY_BUS:
1067 bus_n = pci_dev_bus_num(cache->dev);
1068 result = PCI_BUILD_BDF(bus_n, 0);
1069 break;
1070 default:
1071 error_report("Invalid PCI requester ID cache type: %d",
1072 cache->type);
1073 exit(1);
1074 break;
1075 }
1076
1077 return result;
1078 }
1079
1080 /* Parse bridges up to the root complex and return requester ID
1081 * cache for specific device. For full PCIe topology, the cache
1082 * result would be exactly the same as getting BDF of the device.
1083 * However, several tricks are required when system mixed up with
1084 * legacy PCI devices and PCIe-to-PCI bridges.
1085 *
1086 * Here we cache the proxy device (and type) not requester ID since
1087 * bus number might change from time to time.
1088 */
pci_req_id_cache_get(PCIDevice * dev)1089 static PCIReqIDCache pci_req_id_cache_get(PCIDevice *dev)
1090 {
1091 PCIDevice *parent;
1092 PCIReqIDCache cache = {
1093 .dev = dev,
1094 .type = PCI_REQ_ID_BDF,
1095 };
1096
1097 while (!pci_bus_is_root(pci_get_bus(dev))) {
1098 /* We are under PCI/PCIe bridges */
1099 parent = pci_get_bus(dev)->parent_dev;
1100 if (pci_is_express(parent)) {
1101 if (pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) {
1102 /* When we pass through PCIe-to-PCI/PCIX bridges, we
1103 * override the requester ID using secondary bus
1104 * number of parent bridge with zeroed devfn
1105 * (pcie-to-pci bridge spec chap 2.3). */
1106 cache.type = PCI_REQ_ID_SECONDARY_BUS;
1107 cache.dev = dev;
1108 }
1109 } else {
1110 /* Legacy PCI, override requester ID with the bridge's
1111 * BDF upstream. When the root complex connects to
1112 * legacy PCI devices (including buses), it can only
1113 * obtain requester ID info from directly attached
1114 * devices. If devices are attached under bridges, only
1115 * the requester ID of the bridge that is directly
1116 * attached to the root complex can be recognized. */
1117 cache.type = PCI_REQ_ID_BDF;
1118 cache.dev = parent;
1119 }
1120 dev = parent;
1121 }
1122
1123 return cache;
1124 }
1125
pci_requester_id(PCIDevice * dev)1126 uint16_t pci_requester_id(PCIDevice *dev)
1127 {
1128 return pci_req_id_cache_extract(&dev->requester_id_cache);
1129 }
1130
pci_bus_devfn_available(PCIBus * bus,int devfn)1131 static bool pci_bus_devfn_available(PCIBus *bus, int devfn)
1132 {
1133 return !(bus->devices[devfn]);
1134 }
1135
pci_bus_devfn_reserved(PCIBus * bus,int devfn)1136 static bool pci_bus_devfn_reserved(PCIBus *bus, int devfn)
1137 {
1138 return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn));
1139 }
1140
pci_bus_get_slot_reserved_mask(PCIBus * bus)1141 uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus)
1142 {
1143 return bus->slot_reserved_mask;
1144 }
1145
pci_bus_set_slot_reserved_mask(PCIBus * bus,uint32_t mask)1146 void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask)
1147 {
1148 bus->slot_reserved_mask |= mask;
1149 }
1150
pci_bus_clear_slot_reserved_mask(PCIBus * bus,uint32_t mask)1151 void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask)
1152 {
1153 bus->slot_reserved_mask &= ~mask;
1154 }
1155
1156 /* -1 for devfn means auto assign */
do_pci_register_device(PCIDevice * pci_dev,const char * name,int devfn,Error ** errp)1157 static PCIDevice *do_pci_register_device(PCIDevice *pci_dev,
1158 const char *name, int devfn,
1159 Error **errp)
1160 {
1161 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
1162 PCIConfigReadFunc *config_read = pc->config_read;
1163 PCIConfigWriteFunc *config_write = pc->config_write;
1164 Error *local_err = NULL;
1165 DeviceState *dev = DEVICE(pci_dev);
1166 PCIBus *bus = pci_get_bus(pci_dev);
1167 bool is_bridge = IS_PCI_BRIDGE(pci_dev);
1168
1169 /* Only pci bridges can be attached to extra PCI root buses */
1170 if (pci_bus_is_root(bus) && bus->parent_dev && !is_bridge) {
1171 error_setg(errp,
1172 "PCI: Only PCI/PCIe bridges can be plugged into %s",
1173 bus->parent_dev->name);
1174 return NULL;
1175 }
1176
1177 if (devfn < 0) {
1178 for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices);
1179 devfn += PCI_FUNC_MAX) {
1180 if (pci_bus_devfn_available(bus, devfn) &&
1181 !pci_bus_devfn_reserved(bus, devfn)) {
1182 goto found;
1183 }
1184 }
1185 error_setg(errp, "PCI: no slot/function available for %s, all in use "
1186 "or reserved", name);
1187 return NULL;
1188 found: ;
1189 } else if (pci_bus_devfn_reserved(bus, devfn)) {
1190 error_setg(errp, "PCI: slot %d function %d not available for %s,"
1191 " reserved",
1192 PCI_SLOT(devfn), PCI_FUNC(devfn), name);
1193 return NULL;
1194 } else if (!pci_bus_devfn_available(bus, devfn)) {
1195 error_setg(errp, "PCI: slot %d function %d not available for %s,"
1196 " in use by %s,id=%s",
1197 PCI_SLOT(devfn), PCI_FUNC(devfn), name,
1198 bus->devices[devfn]->name, bus->devices[devfn]->qdev.id);
1199 return NULL;
1200 }
1201
1202 /*
1203 * Populating function 0 triggers a scan from the guest that
1204 * exposes other non-zero functions. Hence we need to ensure that
1205 * function 0 wasn't added yet.
1206 */
1207 if (dev->hotplugged && !pci_is_vf(pci_dev) &&
1208 pci_get_function_0(pci_dev)) {
1209 error_setg(errp, "PCI: slot %d function 0 already occupied by %s,"
1210 " new func %s cannot be exposed to guest.",
1211 PCI_SLOT(pci_get_function_0(pci_dev)->devfn),
1212 pci_get_function_0(pci_dev)->name,
1213 name);
1214
1215 return NULL;
1216 }
1217
1218 pci_dev->devfn = devfn;
1219 pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev);
1220 pstrcpy(pci_dev->name, sizeof(pci_dev->name), name);
1221
1222 memory_region_init(&pci_dev->bus_master_container_region, OBJECT(pci_dev),
1223 "bus master container", UINT64_MAX);
1224 address_space_init(&pci_dev->bus_master_as,
1225 &pci_dev->bus_master_container_region, pci_dev->name);
1226 pci_dev->bus_master_as.max_bounce_buffer_size =
1227 pci_dev->max_bounce_buffer_size;
1228
1229 if (phase_check(PHASE_MACHINE_READY)) {
1230 pci_init_bus_master(pci_dev);
1231 }
1232 pci_dev->irq_state = 0;
1233 pci_config_alloc(pci_dev);
1234
1235 pci_config_set_vendor_id(pci_dev->config, pc->vendor_id);
1236 pci_config_set_device_id(pci_dev->config, pc->device_id);
1237 pci_config_set_revision(pci_dev->config, pc->revision);
1238 pci_config_set_class(pci_dev->config, pc->class_id);
1239
1240 if (!is_bridge) {
1241 if (pc->subsystem_vendor_id || pc->subsystem_id) {
1242 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
1243 pc->subsystem_vendor_id);
1244 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
1245 pc->subsystem_id);
1246 } else {
1247 pci_set_default_subsystem_id(pci_dev);
1248 }
1249 } else {
1250 /* subsystem_vendor_id/subsystem_id are only for header type 0 */
1251 assert(!pc->subsystem_vendor_id);
1252 assert(!pc->subsystem_id);
1253 }
1254 pci_init_cmask(pci_dev);
1255 pci_init_wmask(pci_dev);
1256 pci_init_w1cmask(pci_dev);
1257 if (is_bridge) {
1258 pci_init_mask_bridge(pci_dev);
1259 }
1260 pci_init_multifunction(bus, pci_dev, &local_err);
1261 if (local_err) {
1262 error_propagate(errp, local_err);
1263 do_pci_unregister_device(pci_dev);
1264 return NULL;
1265 }
1266
1267 if (!config_read)
1268 config_read = pci_default_read_config;
1269 if (!config_write)
1270 config_write = pci_default_write_config;
1271 pci_dev->config_read = config_read;
1272 pci_dev->config_write = config_write;
1273 bus->devices[devfn] = pci_dev;
1274 pci_dev->version_id = 2; /* Current pci device vmstate version */
1275 return pci_dev;
1276 }
1277
pci_unregister_io_regions(PCIDevice * pci_dev)1278 static void pci_unregister_io_regions(PCIDevice *pci_dev)
1279 {
1280 PCIIORegion *r;
1281 int i;
1282
1283 for(i = 0; i < PCI_NUM_REGIONS; i++) {
1284 r = &pci_dev->io_regions[i];
1285 if (!r->size || r->addr == PCI_BAR_UNMAPPED)
1286 continue;
1287 memory_region_del_subregion(r->address_space, r->memory);
1288 }
1289
1290 pci_unregister_vga(pci_dev);
1291 }
1292
pci_qdev_unrealize(DeviceState * dev)1293 static void pci_qdev_unrealize(DeviceState *dev)
1294 {
1295 PCIDevice *pci_dev = PCI_DEVICE(dev);
1296 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
1297
1298 pci_unregister_io_regions(pci_dev);
1299 pci_del_option_rom(pci_dev);
1300
1301 if (pc->exit) {
1302 pc->exit(pci_dev);
1303 }
1304
1305 pci_device_deassert_intx(pci_dev);
1306 do_pci_unregister_device(pci_dev);
1307
1308 pci_dev->msi_trigger = NULL;
1309
1310 /*
1311 * clean up acpi-index so it could reused by another device
1312 */
1313 if (pci_dev->acpi_index) {
1314 GSequence *used_indexes = pci_acpi_index_list();
1315
1316 g_sequence_remove(g_sequence_lookup(used_indexes,
1317 GINT_TO_POINTER(pci_dev->acpi_index),
1318 g_cmp_uint32, NULL));
1319 }
1320 }
1321
pci_register_bar(PCIDevice * pci_dev,int region_num,uint8_t type,MemoryRegion * memory)1322 void pci_register_bar(PCIDevice *pci_dev, int region_num,
1323 uint8_t type, MemoryRegion *memory)
1324 {
1325 PCIIORegion *r;
1326 uint32_t addr; /* offset in pci config space */
1327 uint64_t wmask;
1328 pcibus_t size = memory_region_size(memory);
1329 uint8_t hdr_type;
1330
1331 assert(!pci_is_vf(pci_dev)); /* VFs must use pcie_sriov_vf_register_bar */
1332 assert(region_num >= 0);
1333 assert(region_num < PCI_NUM_REGIONS);
1334 assert(is_power_of_2(size));
1335
1336 /* A PCI bridge device (with Type 1 header) may only have at most 2 BARs */
1337 hdr_type =
1338 pci_dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
1339 assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2);
1340
1341 r = &pci_dev->io_regions[region_num];
1342 r->addr = PCI_BAR_UNMAPPED;
1343 r->size = size;
1344 r->type = type;
1345 r->memory = memory;
1346 r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO
1347 ? pci_get_bus(pci_dev)->address_space_io
1348 : pci_get_bus(pci_dev)->address_space_mem;
1349
1350 wmask = ~(size - 1);
1351 if (region_num == PCI_ROM_SLOT) {
1352 /* ROM enable bit is writable */
1353 wmask |= PCI_ROM_ADDRESS_ENABLE;
1354 }
1355
1356 addr = pci_bar(pci_dev, region_num);
1357 pci_set_long(pci_dev->config + addr, type);
1358
1359 if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) &&
1360 r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1361 pci_set_quad(pci_dev->wmask + addr, wmask);
1362 pci_set_quad(pci_dev->cmask + addr, ~0ULL);
1363 } else {
1364 pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff);
1365 pci_set_long(pci_dev->cmask + addr, 0xffffffff);
1366 }
1367 }
1368
pci_update_vga(PCIDevice * pci_dev)1369 static void pci_update_vga(PCIDevice *pci_dev)
1370 {
1371 uint16_t cmd;
1372
1373 if (!pci_dev->has_vga) {
1374 return;
1375 }
1376
1377 cmd = pci_get_word(pci_dev->config + PCI_COMMAND);
1378
1379 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_MEM],
1380 cmd & PCI_COMMAND_MEMORY);
1381 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO],
1382 cmd & PCI_COMMAND_IO);
1383 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI],
1384 cmd & PCI_COMMAND_IO);
1385 }
1386
pci_register_vga(PCIDevice * pci_dev,MemoryRegion * mem,MemoryRegion * io_lo,MemoryRegion * io_hi)1387 void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem,
1388 MemoryRegion *io_lo, MemoryRegion *io_hi)
1389 {
1390 PCIBus *bus = pci_get_bus(pci_dev);
1391
1392 assert(!pci_dev->has_vga);
1393
1394 assert(memory_region_size(mem) == QEMU_PCI_VGA_MEM_SIZE);
1395 pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem;
1396 memory_region_add_subregion_overlap(bus->address_space_mem,
1397 QEMU_PCI_VGA_MEM_BASE, mem, 1);
1398
1399 assert(memory_region_size(io_lo) == QEMU_PCI_VGA_IO_LO_SIZE);
1400 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo;
1401 memory_region_add_subregion_overlap(bus->address_space_io,
1402 QEMU_PCI_VGA_IO_LO_BASE, io_lo, 1);
1403
1404 assert(memory_region_size(io_hi) == QEMU_PCI_VGA_IO_HI_SIZE);
1405 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi;
1406 memory_region_add_subregion_overlap(bus->address_space_io,
1407 QEMU_PCI_VGA_IO_HI_BASE, io_hi, 1);
1408 pci_dev->has_vga = true;
1409
1410 pci_update_vga(pci_dev);
1411 }
1412
pci_unregister_vga(PCIDevice * pci_dev)1413 void pci_unregister_vga(PCIDevice *pci_dev)
1414 {
1415 PCIBus *bus = pci_get_bus(pci_dev);
1416
1417 if (!pci_dev->has_vga) {
1418 return;
1419 }
1420
1421 memory_region_del_subregion(bus->address_space_mem,
1422 pci_dev->vga_regions[QEMU_PCI_VGA_MEM]);
1423 memory_region_del_subregion(bus->address_space_io,
1424 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO]);
1425 memory_region_del_subregion(bus->address_space_io,
1426 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI]);
1427 pci_dev->has_vga = false;
1428 }
1429
pci_get_bar_addr(PCIDevice * pci_dev,int region_num)1430 pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num)
1431 {
1432 return pci_dev->io_regions[region_num].addr;
1433 }
1434
pci_config_get_bar_addr(PCIDevice * d,int reg,uint8_t type,pcibus_t size)1435 static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg,
1436 uint8_t type, pcibus_t size)
1437 {
1438 pcibus_t new_addr;
1439 if (!pci_is_vf(d)) {
1440 int bar = pci_bar(d, reg);
1441 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1442 new_addr = pci_get_quad(d->config + bar);
1443 } else {
1444 new_addr = pci_get_long(d->config + bar);
1445 }
1446 } else {
1447 PCIDevice *pf = d->exp.sriov_vf.pf;
1448 uint16_t sriov_cap = pf->exp.sriov_cap;
1449 int bar = sriov_cap + PCI_SRIOV_BAR + reg * 4;
1450 uint16_t vf_offset =
1451 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET);
1452 uint16_t vf_stride =
1453 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE);
1454 uint32_t vf_num = (d->devfn - (pf->devfn + vf_offset)) / vf_stride;
1455
1456 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1457 new_addr = pci_get_quad(pf->config + bar);
1458 } else {
1459 new_addr = pci_get_long(pf->config + bar);
1460 }
1461 new_addr += vf_num * size;
1462 }
1463 /* The ROM slot has a specific enable bit, keep it intact */
1464 if (reg != PCI_ROM_SLOT) {
1465 new_addr &= ~(size - 1);
1466 }
1467 return new_addr;
1468 }
1469
pci_bar_address(PCIDevice * d,int reg,uint8_t type,pcibus_t size)1470 pcibus_t pci_bar_address(PCIDevice *d,
1471 int reg, uint8_t type, pcibus_t size)
1472 {
1473 pcibus_t new_addr, last_addr;
1474 uint16_t cmd = pci_get_word(d->config + PCI_COMMAND);
1475 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
1476 bool allow_0_address = mc->pci_allow_0_address;
1477
1478 if (type & PCI_BASE_ADDRESS_SPACE_IO) {
1479 if (!(cmd & PCI_COMMAND_IO)) {
1480 return PCI_BAR_UNMAPPED;
1481 }
1482 new_addr = pci_config_get_bar_addr(d, reg, type, size);
1483 last_addr = new_addr + size - 1;
1484 /* Check if 32 bit BAR wraps around explicitly.
1485 * TODO: make priorities correct and remove this work around.
1486 */
1487 if (last_addr <= new_addr || last_addr >= UINT32_MAX ||
1488 (!allow_0_address && new_addr == 0)) {
1489 return PCI_BAR_UNMAPPED;
1490 }
1491 return new_addr;
1492 }
1493
1494 if (!(cmd & PCI_COMMAND_MEMORY)) {
1495 return PCI_BAR_UNMAPPED;
1496 }
1497 new_addr = pci_config_get_bar_addr(d, reg, type, size);
1498 /* the ROM slot has a specific enable bit */
1499 if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) {
1500 return PCI_BAR_UNMAPPED;
1501 }
1502 new_addr &= ~(size - 1);
1503 last_addr = new_addr + size - 1;
1504 /* NOTE: we do not support wrapping */
1505 /* XXX: as we cannot support really dynamic
1506 mappings, we handle specific values as invalid
1507 mappings. */
1508 if (last_addr <= new_addr || last_addr == PCI_BAR_UNMAPPED ||
1509 (!allow_0_address && new_addr == 0)) {
1510 return PCI_BAR_UNMAPPED;
1511 }
1512
1513 /* Now pcibus_t is 64bit.
1514 * Check if 32 bit BAR wraps around explicitly.
1515 * Without this, PC ide doesn't work well.
1516 * TODO: remove this work around.
1517 */
1518 if (!(type & PCI_BASE_ADDRESS_MEM_TYPE_64) && last_addr >= UINT32_MAX) {
1519 return PCI_BAR_UNMAPPED;
1520 }
1521
1522 /*
1523 * OS is allowed to set BAR beyond its addressable
1524 * bits. For example, 32 bit OS can set 64bit bar
1525 * to >4G. Check it. TODO: we might need to support
1526 * it in the future for e.g. PAE.
1527 */
1528 if (last_addr >= HWADDR_MAX) {
1529 return PCI_BAR_UNMAPPED;
1530 }
1531
1532 return new_addr;
1533 }
1534
pci_update_mappings(PCIDevice * d)1535 static void pci_update_mappings(PCIDevice *d)
1536 {
1537 PCIIORegion *r;
1538 int i;
1539 pcibus_t new_addr;
1540
1541 for(i = 0; i < PCI_NUM_REGIONS; i++) {
1542 r = &d->io_regions[i];
1543
1544 /* this region isn't registered */
1545 if (!r->size)
1546 continue;
1547
1548 new_addr = pci_bar_address(d, i, r->type, r->size);
1549 if (!d->has_power) {
1550 new_addr = PCI_BAR_UNMAPPED;
1551 }
1552
1553 /* This bar isn't changed */
1554 if (new_addr == r->addr)
1555 continue;
1556
1557 /* now do the real mapping */
1558 if (r->addr != PCI_BAR_UNMAPPED) {
1559 trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d),
1560 PCI_SLOT(d->devfn),
1561 PCI_FUNC(d->devfn),
1562 i, r->addr, r->size);
1563 memory_region_del_subregion(r->address_space, r->memory);
1564 }
1565 r->addr = new_addr;
1566 if (r->addr != PCI_BAR_UNMAPPED) {
1567 trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d),
1568 PCI_SLOT(d->devfn),
1569 PCI_FUNC(d->devfn),
1570 i, r->addr, r->size);
1571 memory_region_add_subregion_overlap(r->address_space,
1572 r->addr, r->memory, 1);
1573 }
1574 }
1575
1576 pci_update_vga(d);
1577 }
1578
pci_irq_disabled(PCIDevice * d)1579 static inline int pci_irq_disabled(PCIDevice *d)
1580 {
1581 return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE;
1582 }
1583
1584 /* Called after interrupt disabled field update in config space,
1585 * assert/deassert interrupts if necessary.
1586 * Gets original interrupt disable bit value (before update). */
pci_update_irq_disabled(PCIDevice * d,int was_irq_disabled)1587 static void pci_update_irq_disabled(PCIDevice *d, int was_irq_disabled)
1588 {
1589 int i, disabled = pci_irq_disabled(d);
1590 if (disabled == was_irq_disabled)
1591 return;
1592 for (i = 0; i < PCI_NUM_PINS; ++i) {
1593 int state = pci_irq_state(d, i);
1594 pci_change_irq_level(d, i, disabled ? -state : state);
1595 }
1596 }
1597
pci_default_read_config(PCIDevice * d,uint32_t address,int len)1598 uint32_t pci_default_read_config(PCIDevice *d,
1599 uint32_t address, int len)
1600 {
1601 uint32_t val = 0;
1602
1603 assert(address + len <= pci_config_size(d));
1604
1605 if (pci_is_express_downstream_port(d) &&
1606 ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) {
1607 pcie_sync_bridge_lnk(d);
1608 }
1609 memcpy(&val, d->config + address, len);
1610 return le32_to_cpu(val);
1611 }
1612
pci_default_write_config(PCIDevice * d,uint32_t addr,uint32_t val_in,int l)1613 void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l)
1614 {
1615 int i, was_irq_disabled = pci_irq_disabled(d);
1616 uint32_t val = val_in;
1617
1618 assert(addr + l <= pci_config_size(d));
1619
1620 for (i = 0; i < l; val >>= 8, ++i) {
1621 uint8_t wmask = d->wmask[addr + i];
1622 uint8_t w1cmask = d->w1cmask[addr + i];
1623 assert(!(wmask & w1cmask));
1624 d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask);
1625 d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */
1626 }
1627 if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) ||
1628 ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) ||
1629 ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) ||
1630 range_covers_byte(addr, l, PCI_COMMAND))
1631 pci_update_mappings(d);
1632
1633 if (ranges_overlap(addr, l, PCI_COMMAND, 2)) {
1634 pci_update_irq_disabled(d, was_irq_disabled);
1635 memory_region_set_enabled(&d->bus_master_enable_region,
1636 (pci_get_word(d->config + PCI_COMMAND)
1637 & PCI_COMMAND_MASTER) && d->has_power);
1638 }
1639
1640 msi_write_config(d, addr, val_in, l);
1641 msix_write_config(d, addr, val_in, l);
1642 pcie_sriov_config_write(d, addr, val_in, l);
1643 }
1644
1645 /***********************************************************/
1646 /* generic PCI irq support */
1647
1648 /* 0 <= irq_num <= 3. level must be 0 or 1 */
pci_irq_handler(void * opaque,int irq_num,int level)1649 static void pci_irq_handler(void *opaque, int irq_num, int level)
1650 {
1651 PCIDevice *pci_dev = opaque;
1652 int change;
1653
1654 assert(0 <= irq_num && irq_num < PCI_NUM_PINS);
1655 assert(level == 0 || level == 1);
1656 change = level - pci_irq_state(pci_dev, irq_num);
1657 if (!change)
1658 return;
1659
1660 pci_set_irq_state(pci_dev, irq_num, level);
1661 pci_update_irq_status(pci_dev);
1662 if (pci_irq_disabled(pci_dev))
1663 return;
1664 pci_change_irq_level(pci_dev, irq_num, change);
1665 }
1666
pci_allocate_irq(PCIDevice * pci_dev)1667 qemu_irq pci_allocate_irq(PCIDevice *pci_dev)
1668 {
1669 int intx = pci_intx(pci_dev);
1670 assert(0 <= intx && intx < PCI_NUM_PINS);
1671
1672 return qemu_allocate_irq(pci_irq_handler, pci_dev, intx);
1673 }
1674
pci_set_irq(PCIDevice * pci_dev,int level)1675 void pci_set_irq(PCIDevice *pci_dev, int level)
1676 {
1677 int intx = pci_intx(pci_dev);
1678 pci_irq_handler(pci_dev, intx, level);
1679 }
1680
1681 /* Special hooks used by device assignment */
pci_bus_set_route_irq_fn(PCIBus * bus,pci_route_irq_fn route_intx_to_irq)1682 void pci_bus_set_route_irq_fn(PCIBus *bus, pci_route_irq_fn route_intx_to_irq)
1683 {
1684 assert(pci_bus_is_root(bus));
1685 bus->route_intx_to_irq = route_intx_to_irq;
1686 }
1687
pci_device_route_intx_to_irq(PCIDevice * dev,int pin)1688 PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin)
1689 {
1690 PCIBus *bus;
1691
1692 do {
1693 int dev_irq = pin;
1694 bus = pci_get_bus(dev);
1695 pin = bus->map_irq(dev, pin);
1696 trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin,
1697 pci_bus_is_root(bus) ? "root-complex"
1698 : DEVICE(bus->parent_dev)->canonical_path);
1699 dev = bus->parent_dev;
1700 } while (dev);
1701
1702 if (!bus->route_intx_to_irq) {
1703 error_report("PCI: Bug - unimplemented PCI INTx routing (%s)",
1704 object_get_typename(OBJECT(bus->qbus.parent)));
1705 return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 };
1706 }
1707
1708 return bus->route_intx_to_irq(bus->irq_opaque, pin);
1709 }
1710
pci_intx_route_changed(PCIINTxRoute * old,PCIINTxRoute * new)1711 bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new)
1712 {
1713 return old->mode != new->mode || old->irq != new->irq;
1714 }
1715
pci_bus_fire_intx_routing_notifier(PCIBus * bus)1716 void pci_bus_fire_intx_routing_notifier(PCIBus *bus)
1717 {
1718 PCIDevice *dev;
1719 PCIBus *sec;
1720 int i;
1721
1722 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
1723 dev = bus->devices[i];
1724 if (dev && dev->intx_routing_notifier) {
1725 dev->intx_routing_notifier(dev);
1726 }
1727 }
1728
1729 QLIST_FOREACH(sec, &bus->child, sibling) {
1730 pci_bus_fire_intx_routing_notifier(sec);
1731 }
1732 }
1733
pci_device_set_intx_routing_notifier(PCIDevice * dev,PCIINTxRoutingNotifier notifier)1734 void pci_device_set_intx_routing_notifier(PCIDevice *dev,
1735 PCIINTxRoutingNotifier notifier)
1736 {
1737 dev->intx_routing_notifier = notifier;
1738 }
1739
1740 /*
1741 * PCI-to-PCI bridge specification
1742 * 9.1: Interrupt routing. Table 9-1
1743 *
1744 * the PCI Express Base Specification, Revision 2.1
1745 * 2.2.8.1: INTx interrupt signaling - Rules
1746 * the Implementation Note
1747 * Table 2-20
1748 */
1749 /*
1750 * 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD
1751 * 0-origin unlike PCI interrupt pin register.
1752 */
pci_swizzle_map_irq_fn(PCIDevice * pci_dev,int pin)1753 int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin)
1754 {
1755 return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin);
1756 }
1757
1758 /***********************************************************/
1759 /* monitor info on PCI */
1760
1761 static const pci_class_desc pci_class_descriptions[] =
1762 {
1763 { 0x0001, "VGA controller", "display"},
1764 { 0x0100, "SCSI controller", "scsi"},
1765 { 0x0101, "IDE controller", "ide"},
1766 { 0x0102, "Floppy controller", "fdc"},
1767 { 0x0103, "IPI controller", "ipi"},
1768 { 0x0104, "RAID controller", "raid"},
1769 { 0x0106, "SATA controller"},
1770 { 0x0107, "SAS controller"},
1771 { 0x0180, "Storage controller"},
1772 { 0x0200, "Ethernet controller", "ethernet"},
1773 { 0x0201, "Token Ring controller", "token-ring"},
1774 { 0x0202, "FDDI controller", "fddi"},
1775 { 0x0203, "ATM controller", "atm"},
1776 { 0x0280, "Network controller"},
1777 { 0x0300, "VGA controller", "display", 0x00ff},
1778 { 0x0301, "XGA controller"},
1779 { 0x0302, "3D controller"},
1780 { 0x0380, "Display controller"},
1781 { 0x0400, "Video controller", "video"},
1782 { 0x0401, "Audio controller", "sound"},
1783 { 0x0402, "Phone"},
1784 { 0x0403, "Audio controller", "sound"},
1785 { 0x0480, "Multimedia controller"},
1786 { 0x0500, "RAM controller", "memory"},
1787 { 0x0501, "Flash controller", "flash"},
1788 { 0x0580, "Memory controller"},
1789 { 0x0600, "Host bridge", "host"},
1790 { 0x0601, "ISA bridge", "isa"},
1791 { 0x0602, "EISA bridge", "eisa"},
1792 { 0x0603, "MC bridge", "mca"},
1793 { 0x0604, "PCI bridge", "pci-bridge"},
1794 { 0x0605, "PCMCIA bridge", "pcmcia"},
1795 { 0x0606, "NUBUS bridge", "nubus"},
1796 { 0x0607, "CARDBUS bridge", "cardbus"},
1797 { 0x0608, "RACEWAY bridge"},
1798 { 0x0680, "Bridge"},
1799 { 0x0700, "Serial port", "serial"},
1800 { 0x0701, "Parallel port", "parallel"},
1801 { 0x0800, "Interrupt controller", "interrupt-controller"},
1802 { 0x0801, "DMA controller", "dma-controller"},
1803 { 0x0802, "Timer", "timer"},
1804 { 0x0803, "RTC", "rtc"},
1805 { 0x0900, "Keyboard", "keyboard"},
1806 { 0x0901, "Pen", "pen"},
1807 { 0x0902, "Mouse", "mouse"},
1808 { 0x0A00, "Dock station", "dock", 0x00ff},
1809 { 0x0B00, "i386 cpu", "cpu", 0x00ff},
1810 { 0x0c00, "Firewire controller", "firewire"},
1811 { 0x0c01, "Access bus controller", "access-bus"},
1812 { 0x0c02, "SSA controller", "ssa"},
1813 { 0x0c03, "USB controller", "usb"},
1814 { 0x0c04, "Fibre channel controller", "fibre-channel"},
1815 { 0x0c05, "SMBus"},
1816 { 0, NULL}
1817 };
1818
pci_for_each_device_under_bus_reverse(PCIBus * bus,pci_bus_dev_fn fn,void * opaque)1819 void pci_for_each_device_under_bus_reverse(PCIBus *bus,
1820 pci_bus_dev_fn fn,
1821 void *opaque)
1822 {
1823 PCIDevice *d;
1824 int devfn;
1825
1826 for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
1827 d = bus->devices[ARRAY_SIZE(bus->devices) - 1 - devfn];
1828 if (d) {
1829 fn(bus, d, opaque);
1830 }
1831 }
1832 }
1833
pci_for_each_device_reverse(PCIBus * bus,int bus_num,pci_bus_dev_fn fn,void * opaque)1834 void pci_for_each_device_reverse(PCIBus *bus, int bus_num,
1835 pci_bus_dev_fn fn, void *opaque)
1836 {
1837 bus = pci_find_bus_nr(bus, bus_num);
1838
1839 if (bus) {
1840 pci_for_each_device_under_bus_reverse(bus, fn, opaque);
1841 }
1842 }
1843
pci_for_each_device_under_bus(PCIBus * bus,pci_bus_dev_fn fn,void * opaque)1844 void pci_for_each_device_under_bus(PCIBus *bus,
1845 pci_bus_dev_fn fn, void *opaque)
1846 {
1847 PCIDevice *d;
1848 int devfn;
1849
1850 for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
1851 d = bus->devices[devfn];
1852 if (d) {
1853 fn(bus, d, opaque);
1854 }
1855 }
1856 }
1857
pci_for_each_device(PCIBus * bus,int bus_num,pci_bus_dev_fn fn,void * opaque)1858 void pci_for_each_device(PCIBus *bus, int bus_num,
1859 pci_bus_dev_fn fn, void *opaque)
1860 {
1861 bus = pci_find_bus_nr(bus, bus_num);
1862
1863 if (bus) {
1864 pci_for_each_device_under_bus(bus, fn, opaque);
1865 }
1866 }
1867
get_class_desc(int class)1868 const pci_class_desc *get_class_desc(int class)
1869 {
1870 const pci_class_desc *desc;
1871
1872 desc = pci_class_descriptions;
1873 while (desc->desc && class != desc->class) {
1874 desc++;
1875 }
1876
1877 return desc;
1878 }
1879
pci_init_nic_devices(PCIBus * bus,const char * default_model)1880 void pci_init_nic_devices(PCIBus *bus, const char *default_model)
1881 {
1882 qemu_create_nic_bus_devices(&bus->qbus, TYPE_PCI_DEVICE, default_model,
1883 "virtio", "virtio-net-pci");
1884 }
1885
pci_init_nic_in_slot(PCIBus * rootbus,const char * model,const char * alias,const char * devaddr)1886 bool pci_init_nic_in_slot(PCIBus *rootbus, const char *model,
1887 const char *alias, const char *devaddr)
1888 {
1889 NICInfo *nd = qemu_find_nic_info(model, true, alias);
1890 int dom, busnr, devfn;
1891 PCIDevice *pci_dev;
1892 unsigned slot;
1893 PCIBus *bus;
1894
1895 if (!nd) {
1896 return false;
1897 }
1898
1899 if (!devaddr || pci_parse_devaddr(devaddr, &dom, &busnr, &slot, NULL) < 0) {
1900 error_report("Invalid PCI device address %s for device %s",
1901 devaddr, model);
1902 exit(1);
1903 }
1904
1905 if (dom != 0) {
1906 error_report("No support for non-zero PCI domains");
1907 exit(1);
1908 }
1909
1910 devfn = PCI_DEVFN(slot, 0);
1911
1912 bus = pci_find_bus_nr(rootbus, busnr);
1913 if (!bus) {
1914 error_report("Invalid PCI device address %s for device %s",
1915 devaddr, model);
1916 exit(1);
1917 }
1918
1919 pci_dev = pci_new(devfn, model);
1920 qdev_set_nic_properties(&pci_dev->qdev, nd);
1921 pci_realize_and_unref(pci_dev, bus, &error_fatal);
1922 return true;
1923 }
1924
pci_vga_init(PCIBus * bus)1925 PCIDevice *pci_vga_init(PCIBus *bus)
1926 {
1927 vga_interface_created = true;
1928 switch (vga_interface_type) {
1929 case VGA_CIRRUS:
1930 return pci_create_simple(bus, -1, "cirrus-vga");
1931 case VGA_QXL:
1932 return pci_create_simple(bus, -1, "qxl-vga");
1933 case VGA_STD:
1934 return pci_create_simple(bus, -1, "VGA");
1935 case VGA_VMWARE:
1936 return pci_create_simple(bus, -1, "vmware-svga");
1937 case VGA_VIRTIO:
1938 return pci_create_simple(bus, -1, "virtio-vga");
1939 case VGA_NONE:
1940 default: /* Other non-PCI types. Checking for unsupported types is already
1941 done in vl.c. */
1942 return NULL;
1943 }
1944 }
1945
1946 /* Whether a given bus number is in range of the secondary
1947 * bus of the given bridge device. */
pci_secondary_bus_in_range(PCIDevice * dev,int bus_num)1948 static bool pci_secondary_bus_in_range(PCIDevice *dev, int bus_num)
1949 {
1950 return !(pci_get_word(dev->config + PCI_BRIDGE_CONTROL) &
1951 PCI_BRIDGE_CTL_BUS_RESET) /* Don't walk the bus if it's reset. */ &&
1952 dev->config[PCI_SECONDARY_BUS] <= bus_num &&
1953 bus_num <= dev->config[PCI_SUBORDINATE_BUS];
1954 }
1955
1956 /* Whether a given bus number is in a range of a root bus */
pci_root_bus_in_range(PCIBus * bus,int bus_num)1957 static bool pci_root_bus_in_range(PCIBus *bus, int bus_num)
1958 {
1959 int i;
1960
1961 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
1962 PCIDevice *dev = bus->devices[i];
1963
1964 if (dev && IS_PCI_BRIDGE(dev)) {
1965 if (pci_secondary_bus_in_range(dev, bus_num)) {
1966 return true;
1967 }
1968 }
1969 }
1970
1971 return false;
1972 }
1973
pci_find_bus_nr(PCIBus * bus,int bus_num)1974 PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num)
1975 {
1976 PCIBus *sec;
1977
1978 if (!bus) {
1979 return NULL;
1980 }
1981
1982 if (pci_bus_num(bus) == bus_num) {
1983 return bus;
1984 }
1985
1986 /* Consider all bus numbers in range for the host pci bridge. */
1987 if (!pci_bus_is_root(bus) &&
1988 !pci_secondary_bus_in_range(bus->parent_dev, bus_num)) {
1989 return NULL;
1990 }
1991
1992 /* try child bus */
1993 for (; bus; bus = sec) {
1994 QLIST_FOREACH(sec, &bus->child, sibling) {
1995 if (pci_bus_num(sec) == bus_num) {
1996 return sec;
1997 }
1998 /* PXB buses assumed to be children of bus 0 */
1999 if (pci_bus_is_root(sec)) {
2000 if (pci_root_bus_in_range(sec, bus_num)) {
2001 break;
2002 }
2003 } else {
2004 if (pci_secondary_bus_in_range(sec->parent_dev, bus_num)) {
2005 break;
2006 }
2007 }
2008 }
2009 }
2010
2011 return NULL;
2012 }
2013
pci_for_each_bus_depth_first(PCIBus * bus,pci_bus_ret_fn begin,pci_bus_fn end,void * parent_state)2014 void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin,
2015 pci_bus_fn end, void *parent_state)
2016 {
2017 PCIBus *sec;
2018 void *state;
2019
2020 if (!bus) {
2021 return;
2022 }
2023
2024 if (begin) {
2025 state = begin(bus, parent_state);
2026 } else {
2027 state = parent_state;
2028 }
2029
2030 QLIST_FOREACH(sec, &bus->child, sibling) {
2031 pci_for_each_bus_depth_first(sec, begin, end, state);
2032 }
2033
2034 if (end) {
2035 end(bus, state);
2036 }
2037 }
2038
2039
pci_find_device(PCIBus * bus,int bus_num,uint8_t devfn)2040 PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn)
2041 {
2042 bus = pci_find_bus_nr(bus, bus_num);
2043
2044 if (!bus)
2045 return NULL;
2046
2047 return bus->devices[devfn];
2048 }
2049
2050 #define ONBOARD_INDEX_MAX (16 * 1024 - 1)
2051
pci_qdev_realize(DeviceState * qdev,Error ** errp)2052 static void pci_qdev_realize(DeviceState *qdev, Error **errp)
2053 {
2054 PCIDevice *pci_dev = (PCIDevice *)qdev;
2055 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
2056 ObjectClass *klass = OBJECT_CLASS(pc);
2057 Error *local_err = NULL;
2058 bool is_default_rom;
2059 uint16_t class_id;
2060
2061 /*
2062 * capped by systemd (see: udev-builtin-net_id.c)
2063 * as it's the only known user honor it to avoid users
2064 * misconfigure QEMU and then wonder why acpi-index doesn't work
2065 */
2066 if (pci_dev->acpi_index > ONBOARD_INDEX_MAX) {
2067 error_setg(errp, "acpi-index should be less or equal to %u",
2068 ONBOARD_INDEX_MAX);
2069 return;
2070 }
2071
2072 /*
2073 * make sure that acpi-index is unique across all present PCI devices
2074 */
2075 if (pci_dev->acpi_index) {
2076 GSequence *used_indexes = pci_acpi_index_list();
2077
2078 if (g_sequence_lookup(used_indexes,
2079 GINT_TO_POINTER(pci_dev->acpi_index),
2080 g_cmp_uint32, NULL)) {
2081 error_setg(errp, "a PCI device with acpi-index = %" PRIu32
2082 " already exist", pci_dev->acpi_index);
2083 return;
2084 }
2085 g_sequence_insert_sorted(used_indexes,
2086 GINT_TO_POINTER(pci_dev->acpi_index),
2087 g_cmp_uint32, NULL);
2088 }
2089
2090 if (pci_dev->romsize != UINT32_MAX && !is_power_of_2(pci_dev->romsize)) {
2091 error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize);
2092 return;
2093 }
2094
2095 /* initialize cap_present for pci_is_express() and pci_config_size(),
2096 * Note that hybrid PCIs are not set automatically and need to manage
2097 * QEMU_PCI_CAP_EXPRESS manually */
2098 if (object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE) &&
2099 !object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE)) {
2100 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
2101 }
2102
2103 if (object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE)) {
2104 pci_dev->cap_present |= QEMU_PCIE_CAP_CXL;
2105 }
2106
2107 pci_dev = do_pci_register_device(pci_dev,
2108 object_get_typename(OBJECT(qdev)),
2109 pci_dev->devfn, errp);
2110 if (pci_dev == NULL)
2111 return;
2112
2113 if (pc->realize) {
2114 pc->realize(pci_dev, &local_err);
2115 if (local_err) {
2116 error_propagate(errp, local_err);
2117 do_pci_unregister_device(pci_dev);
2118 return;
2119 }
2120 }
2121
2122 /*
2123 * A PCIe Downstream Port that do not have ARI Forwarding enabled must
2124 * associate only Device 0 with the device attached to the bus
2125 * representing the Link from the Port (PCIe base spec rev 4.0 ver 0.3,
2126 * sec 7.3.1).
2127 * With ARI, PCI_SLOT() can return non-zero value as the traditional
2128 * 5-bit Device Number and 3-bit Function Number fields in its associated
2129 * Routing IDs, Requester IDs and Completer IDs are interpreted as a
2130 * single 8-bit Function Number. Hence, ignore ARI capable devices.
2131 */
2132 if (pci_is_express(pci_dev) &&
2133 !pcie_find_capability(pci_dev, PCI_EXT_CAP_ID_ARI) &&
2134 pcie_has_upstream_port(pci_dev) &&
2135 PCI_SLOT(pci_dev->devfn)) {
2136 warn_report("PCI: slot %d is not valid for %s,"
2137 " parent device only allows plugging into slot 0.",
2138 PCI_SLOT(pci_dev->devfn), pci_dev->name);
2139 }
2140
2141 if (pci_dev->failover_pair_id) {
2142 if (!pci_bus_is_express(pci_get_bus(pci_dev))) {
2143 error_setg(errp, "failover primary device must be on "
2144 "PCIExpress bus");
2145 pci_qdev_unrealize(DEVICE(pci_dev));
2146 return;
2147 }
2148 class_id = pci_get_word(pci_dev->config + PCI_CLASS_DEVICE);
2149 if (class_id != PCI_CLASS_NETWORK_ETHERNET) {
2150 error_setg(errp, "failover primary device is not an "
2151 "Ethernet device");
2152 pci_qdev_unrealize(DEVICE(pci_dev));
2153 return;
2154 }
2155 if ((pci_dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)
2156 || (PCI_FUNC(pci_dev->devfn) != 0)) {
2157 error_setg(errp, "failover: primary device must be in its own "
2158 "PCI slot");
2159 pci_qdev_unrealize(DEVICE(pci_dev));
2160 return;
2161 }
2162 qdev->allow_unplug_during_migration = true;
2163 }
2164
2165 /* rom loading */
2166 is_default_rom = false;
2167 if (pci_dev->romfile == NULL && pc->romfile != NULL) {
2168 pci_dev->romfile = g_strdup(pc->romfile);
2169 is_default_rom = true;
2170 }
2171
2172 pci_add_option_rom(pci_dev, is_default_rom, &local_err);
2173 if (local_err) {
2174 error_propagate(errp, local_err);
2175 pci_qdev_unrealize(DEVICE(pci_dev));
2176 return;
2177 }
2178
2179 pci_set_power(pci_dev, true);
2180
2181 pci_dev->msi_trigger = pci_msi_trigger;
2182 }
2183
pci_new_internal(int devfn,bool multifunction,const char * name)2184 static PCIDevice *pci_new_internal(int devfn, bool multifunction,
2185 const char *name)
2186 {
2187 DeviceState *dev;
2188
2189 dev = qdev_new(name);
2190 qdev_prop_set_int32(dev, "addr", devfn);
2191 qdev_prop_set_bit(dev, "multifunction", multifunction);
2192 return PCI_DEVICE(dev);
2193 }
2194
pci_new_multifunction(int devfn,const char * name)2195 PCIDevice *pci_new_multifunction(int devfn, const char *name)
2196 {
2197 return pci_new_internal(devfn, true, name);
2198 }
2199
pci_new(int devfn,const char * name)2200 PCIDevice *pci_new(int devfn, const char *name)
2201 {
2202 return pci_new_internal(devfn, false, name);
2203 }
2204
pci_realize_and_unref(PCIDevice * dev,PCIBus * bus,Error ** errp)2205 bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp)
2206 {
2207 return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp);
2208 }
2209
pci_create_simple_multifunction(PCIBus * bus,int devfn,const char * name)2210 PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn,
2211 const char *name)
2212 {
2213 PCIDevice *dev = pci_new_multifunction(devfn, name);
2214 pci_realize_and_unref(dev, bus, &error_fatal);
2215 return dev;
2216 }
2217
pci_create_simple(PCIBus * bus,int devfn,const char * name)2218 PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name)
2219 {
2220 PCIDevice *dev = pci_new(devfn, name);
2221 pci_realize_and_unref(dev, bus, &error_fatal);
2222 return dev;
2223 }
2224
pci_find_space(PCIDevice * pdev,uint8_t size)2225 static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size)
2226 {
2227 int offset = PCI_CONFIG_HEADER_SIZE;
2228 int i;
2229 for (i = PCI_CONFIG_HEADER_SIZE; i < PCI_CONFIG_SPACE_SIZE; ++i) {
2230 if (pdev->used[i])
2231 offset = i + 1;
2232 else if (i - offset + 1 == size)
2233 return offset;
2234 }
2235 return 0;
2236 }
2237
pci_find_capability_list(PCIDevice * pdev,uint8_t cap_id,uint8_t * prev_p)2238 static uint8_t pci_find_capability_list(PCIDevice *pdev, uint8_t cap_id,
2239 uint8_t *prev_p)
2240 {
2241 uint8_t next, prev;
2242
2243 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST))
2244 return 0;
2245
2246 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]);
2247 prev = next + PCI_CAP_LIST_NEXT)
2248 if (pdev->config[next + PCI_CAP_LIST_ID] == cap_id)
2249 break;
2250
2251 if (prev_p)
2252 *prev_p = prev;
2253 return next;
2254 }
2255
pci_find_capability_at_offset(PCIDevice * pdev,uint8_t offset)2256 static uint8_t pci_find_capability_at_offset(PCIDevice *pdev, uint8_t offset)
2257 {
2258 uint8_t next, prev, found = 0;
2259
2260 if (!(pdev->used[offset])) {
2261 return 0;
2262 }
2263
2264 assert(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST);
2265
2266 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]);
2267 prev = next + PCI_CAP_LIST_NEXT) {
2268 if (next <= offset && next > found) {
2269 found = next;
2270 }
2271 }
2272 return found;
2273 }
2274
2275 /* Patch the PCI vendor and device ids in a PCI rom image if necessary.
2276 This is needed for an option rom which is used for more than one device. */
pci_patch_ids(PCIDevice * pdev,uint8_t * ptr,uint32_t size)2277 static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size)
2278 {
2279 uint16_t vendor_id;
2280 uint16_t device_id;
2281 uint16_t rom_vendor_id;
2282 uint16_t rom_device_id;
2283 uint16_t rom_magic;
2284 uint16_t pcir_offset;
2285 uint8_t checksum;
2286
2287 /* Words in rom data are little endian (like in PCI configuration),
2288 so they can be read / written with pci_get_word / pci_set_word. */
2289
2290 /* Only a valid rom will be patched. */
2291 rom_magic = pci_get_word(ptr);
2292 if (rom_magic != 0xaa55) {
2293 PCI_DPRINTF("Bad ROM magic %04x\n", rom_magic);
2294 return;
2295 }
2296 pcir_offset = pci_get_word(ptr + 0x18);
2297 if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) {
2298 PCI_DPRINTF("Bad PCIR offset 0x%x or signature\n", pcir_offset);
2299 return;
2300 }
2301
2302 vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
2303 device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
2304 rom_vendor_id = pci_get_word(ptr + pcir_offset + 4);
2305 rom_device_id = pci_get_word(ptr + pcir_offset + 6);
2306
2307 PCI_DPRINTF("%s: ROM id %04x%04x / PCI id %04x%04x\n", pdev->romfile,
2308 vendor_id, device_id, rom_vendor_id, rom_device_id);
2309
2310 checksum = ptr[6];
2311
2312 if (vendor_id != rom_vendor_id) {
2313 /* Patch vendor id and checksum (at offset 6 for etherboot roms). */
2314 checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8);
2315 checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8);
2316 PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum);
2317 ptr[6] = checksum;
2318 pci_set_word(ptr + pcir_offset + 4, vendor_id);
2319 }
2320
2321 if (device_id != rom_device_id) {
2322 /* Patch device id and checksum (at offset 6 for etherboot roms). */
2323 checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8);
2324 checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8);
2325 PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum);
2326 ptr[6] = checksum;
2327 pci_set_word(ptr + pcir_offset + 6, device_id);
2328 }
2329 }
2330
2331 /* Add an option rom for the device */
pci_add_option_rom(PCIDevice * pdev,bool is_default_rom,Error ** errp)2332 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom,
2333 Error **errp)
2334 {
2335 int64_t size = 0;
2336 g_autofree char *path = NULL;
2337 char name[32];
2338 const VMStateDescription *vmsd;
2339
2340 /*
2341 * In case of incoming migration ROM will come with migration stream, no
2342 * reason to load the file. Neither we want to fail if local ROM file
2343 * mismatches with specified romsize.
2344 */
2345 bool load_file = !runstate_check(RUN_STATE_INMIGRATE);
2346
2347 if (!pdev->romfile || !strlen(pdev->romfile)) {
2348 return;
2349 }
2350
2351 if (!pdev->rom_bar) {
2352 /*
2353 * Load rom via fw_cfg instead of creating a rom bar,
2354 * for 0.11 compatibility.
2355 */
2356 int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE);
2357
2358 /*
2359 * Hot-plugged devices can't use the option ROM
2360 * if the rom bar is disabled.
2361 */
2362 if (DEVICE(pdev)->hotplugged) {
2363 error_setg(errp, "Hot-plugged device without ROM bar"
2364 " can't have an option ROM");
2365 return;
2366 }
2367
2368 if (class == 0x0300) {
2369 rom_add_vga(pdev->romfile);
2370 } else {
2371 rom_add_option(pdev->romfile, -1);
2372 }
2373 return;
2374 }
2375
2376 if (load_file || pdev->romsize == UINT32_MAX) {
2377 path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile);
2378 if (path == NULL) {
2379 path = g_strdup(pdev->romfile);
2380 }
2381
2382 size = get_image_size(path);
2383 if (size < 0) {
2384 error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile);
2385 return;
2386 } else if (size == 0) {
2387 error_setg(errp, "romfile \"%s\" is empty", pdev->romfile);
2388 return;
2389 } else if (size > 2 * GiB) {
2390 error_setg(errp,
2391 "romfile \"%s\" too large (size cannot exceed 2 GiB)",
2392 pdev->romfile);
2393 return;
2394 }
2395 if (pdev->romsize != UINT_MAX) {
2396 if (size > pdev->romsize) {
2397 error_setg(errp, "romfile \"%s\" (%u bytes) "
2398 "is too large for ROM size %u",
2399 pdev->romfile, (uint32_t)size, pdev->romsize);
2400 return;
2401 }
2402 } else {
2403 pdev->romsize = pow2ceil(size);
2404 }
2405 }
2406
2407 vmsd = qdev_get_vmsd(DEVICE(pdev));
2408 snprintf(name, sizeof(name), "%s.rom",
2409 vmsd ? vmsd->name : object_get_typename(OBJECT(pdev)));
2410
2411 pdev->has_rom = true;
2412 memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize,
2413 &error_fatal);
2414
2415 if (load_file) {
2416 void *ptr = memory_region_get_ram_ptr(&pdev->rom);
2417
2418 if (load_image_size(path, ptr, size) < 0) {
2419 error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile);
2420 return;
2421 }
2422
2423 if (is_default_rom) {
2424 /* Only the default rom images will be patched (if needed). */
2425 pci_patch_ids(pdev, ptr, size);
2426 }
2427 }
2428
2429 pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom);
2430 }
2431
pci_del_option_rom(PCIDevice * pdev)2432 static void pci_del_option_rom(PCIDevice *pdev)
2433 {
2434 if (!pdev->has_rom)
2435 return;
2436
2437 vmstate_unregister_ram(&pdev->rom, &pdev->qdev);
2438 pdev->has_rom = false;
2439 }
2440
2441 /*
2442 * On success, pci_add_capability() returns a positive value
2443 * that the offset of the pci capability.
2444 * On failure, it sets an error and returns a negative error
2445 * code.
2446 */
pci_add_capability(PCIDevice * pdev,uint8_t cap_id,uint8_t offset,uint8_t size,Error ** errp)2447 int pci_add_capability(PCIDevice *pdev, uint8_t cap_id,
2448 uint8_t offset, uint8_t size,
2449 Error **errp)
2450 {
2451 uint8_t *config;
2452 int i, overlapping_cap;
2453
2454 if (!offset) {
2455 offset = pci_find_space(pdev, size);
2456 /* out of PCI config space is programming error */
2457 assert(offset);
2458 } else {
2459 /* Verify that capabilities don't overlap. Note: device assignment
2460 * depends on this check to verify that the device is not broken.
2461 * Should never trigger for emulated devices, but it's helpful
2462 * for debugging these. */
2463 for (i = offset; i < offset + size; i++) {
2464 overlapping_cap = pci_find_capability_at_offset(pdev, i);
2465 if (overlapping_cap) {
2466 error_setg(errp, "%s:%02x:%02x.%x "
2467 "Attempt to add PCI capability %x at offset "
2468 "%x overlaps existing capability %x at offset %x",
2469 pci_root_bus_path(pdev), pci_dev_bus_num(pdev),
2470 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2471 cap_id, offset, overlapping_cap, i);
2472 return -EINVAL;
2473 }
2474 }
2475 }
2476
2477 config = pdev->config + offset;
2478 config[PCI_CAP_LIST_ID] = cap_id;
2479 config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST];
2480 pdev->config[PCI_CAPABILITY_LIST] = offset;
2481 pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
2482 memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4));
2483 /* Make capability read-only by default */
2484 memset(pdev->wmask + offset, 0, size);
2485 /* Check capability by default */
2486 memset(pdev->cmask + offset, 0xFF, size);
2487 return offset;
2488 }
2489
2490 /* Unlink capability from the pci config space. */
pci_del_capability(PCIDevice * pdev,uint8_t cap_id,uint8_t size)2491 void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size)
2492 {
2493 uint8_t prev, offset = pci_find_capability_list(pdev, cap_id, &prev);
2494 if (!offset)
2495 return;
2496 pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT];
2497 /* Make capability writable again */
2498 memset(pdev->wmask + offset, 0xff, size);
2499 memset(pdev->w1cmask + offset, 0, size);
2500 /* Clear cmask as device-specific registers can't be checked */
2501 memset(pdev->cmask + offset, 0, size);
2502 memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4));
2503
2504 if (!pdev->config[PCI_CAPABILITY_LIST])
2505 pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST;
2506 }
2507
pci_find_capability(PCIDevice * pdev,uint8_t cap_id)2508 uint8_t pci_find_capability(PCIDevice *pdev, uint8_t cap_id)
2509 {
2510 return pci_find_capability_list(pdev, cap_id, NULL);
2511 }
2512
pci_dev_fw_name(DeviceState * dev,char * buf,int len)2513 static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len)
2514 {
2515 PCIDevice *d = (PCIDevice *)dev;
2516 const char *name = NULL;
2517 const pci_class_desc *desc = pci_class_descriptions;
2518 int class = pci_get_word(d->config + PCI_CLASS_DEVICE);
2519
2520 while (desc->desc &&
2521 (class & ~desc->fw_ign_bits) !=
2522 (desc->class & ~desc->fw_ign_bits)) {
2523 desc++;
2524 }
2525
2526 if (desc->desc) {
2527 name = desc->fw_name;
2528 }
2529
2530 if (name) {
2531 pstrcpy(buf, len, name);
2532 } else {
2533 snprintf(buf, len, "pci%04x,%04x",
2534 pci_get_word(d->config + PCI_VENDOR_ID),
2535 pci_get_word(d->config + PCI_DEVICE_ID));
2536 }
2537
2538 return buf;
2539 }
2540
pcibus_get_fw_dev_path(DeviceState * dev)2541 static char *pcibus_get_fw_dev_path(DeviceState *dev)
2542 {
2543 PCIDevice *d = (PCIDevice *)dev;
2544 char name[33];
2545 int has_func = !!PCI_FUNC(d->devfn);
2546
2547 return g_strdup_printf("%s@%x%s%.*x",
2548 pci_dev_fw_name(dev, name, sizeof(name)),
2549 PCI_SLOT(d->devfn),
2550 has_func ? "," : "",
2551 has_func,
2552 PCI_FUNC(d->devfn));
2553 }
2554
pcibus_get_dev_path(DeviceState * dev)2555 static char *pcibus_get_dev_path(DeviceState *dev)
2556 {
2557 PCIDevice *d = container_of(dev, PCIDevice, qdev);
2558 PCIDevice *t;
2559 int slot_depth;
2560 /* Path format: Domain:00:Slot.Function:Slot.Function....:Slot.Function.
2561 * 00 is added here to make this format compatible with
2562 * domain:Bus:Slot.Func for systems without nested PCI bridges.
2563 * Slot.Function list specifies the slot and function numbers for all
2564 * devices on the path from root to the specific device. */
2565 const char *root_bus_path;
2566 int root_bus_len;
2567 char slot[] = ":SS.F";
2568 int slot_len = sizeof slot - 1 /* For '\0' */;
2569 int path_len;
2570 char *path, *p;
2571 int s;
2572
2573 root_bus_path = pci_root_bus_path(d);
2574 root_bus_len = strlen(root_bus_path);
2575
2576 /* Calculate # of slots on path between device and root. */;
2577 slot_depth = 0;
2578 for (t = d; t; t = pci_get_bus(t)->parent_dev) {
2579 ++slot_depth;
2580 }
2581
2582 path_len = root_bus_len + slot_len * slot_depth;
2583
2584 /* Allocate memory, fill in the terminating null byte. */
2585 path = g_malloc(path_len + 1 /* For '\0' */);
2586 path[path_len] = '\0';
2587
2588 memcpy(path, root_bus_path, root_bus_len);
2589
2590 /* Fill in slot numbers. We walk up from device to root, so need to print
2591 * them in the reverse order, last to first. */
2592 p = path + path_len;
2593 for (t = d; t; t = pci_get_bus(t)->parent_dev) {
2594 p -= slot_len;
2595 s = snprintf(slot, sizeof slot, ":%02x.%x",
2596 PCI_SLOT(t->devfn), PCI_FUNC(t->devfn));
2597 assert(s == slot_len);
2598 memcpy(p, slot, slot_len);
2599 }
2600
2601 return path;
2602 }
2603
pci_qdev_find_recursive(PCIBus * bus,const char * id,PCIDevice ** pdev)2604 static int pci_qdev_find_recursive(PCIBus *bus,
2605 const char *id, PCIDevice **pdev)
2606 {
2607 DeviceState *qdev = qdev_find_recursive(&bus->qbus, id);
2608 if (!qdev) {
2609 return -ENODEV;
2610 }
2611
2612 /* roughly check if given qdev is pci device */
2613 if (object_dynamic_cast(OBJECT(qdev), TYPE_PCI_DEVICE)) {
2614 *pdev = PCI_DEVICE(qdev);
2615 return 0;
2616 }
2617 return -EINVAL;
2618 }
2619
pci_qdev_find_device(const char * id,PCIDevice ** pdev)2620 int pci_qdev_find_device(const char *id, PCIDevice **pdev)
2621 {
2622 PCIHostState *host_bridge;
2623 int rc = -ENODEV;
2624
2625 QLIST_FOREACH(host_bridge, &pci_host_bridges, next) {
2626 int tmp = pci_qdev_find_recursive(host_bridge->bus, id, pdev);
2627 if (!tmp) {
2628 rc = 0;
2629 break;
2630 }
2631 if (tmp != -ENODEV) {
2632 rc = tmp;
2633 }
2634 }
2635
2636 return rc;
2637 }
2638
pci_address_space(PCIDevice * dev)2639 MemoryRegion *pci_address_space(PCIDevice *dev)
2640 {
2641 return pci_get_bus(dev)->address_space_mem;
2642 }
2643
pci_address_space_io(PCIDevice * dev)2644 MemoryRegion *pci_address_space_io(PCIDevice *dev)
2645 {
2646 return pci_get_bus(dev)->address_space_io;
2647 }
2648
pci_device_class_init(ObjectClass * klass,void * data)2649 static void pci_device_class_init(ObjectClass *klass, void *data)
2650 {
2651 DeviceClass *k = DEVICE_CLASS(klass);
2652
2653 k->realize = pci_qdev_realize;
2654 k->unrealize = pci_qdev_unrealize;
2655 k->bus_type = TYPE_PCI_BUS;
2656 device_class_set_props(k, pci_props);
2657 object_class_property_set_description(
2658 klass, "x-max-bounce-buffer-size",
2659 "Maximum buffer size allocated for bounce buffers used for mapped "
2660 "access to indirect DMA memory");
2661 }
2662
pci_device_class_base_init(ObjectClass * klass,void * data)2663 static void pci_device_class_base_init(ObjectClass *klass, void *data)
2664 {
2665 if (!object_class_is_abstract(klass)) {
2666 ObjectClass *conventional =
2667 object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE);
2668 ObjectClass *pcie =
2669 object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE);
2670 ObjectClass *cxl =
2671 object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE);
2672 assert(conventional || pcie || cxl);
2673 }
2674 }
2675
2676 /*
2677 * Get IOMMU root bus, aliased bus and devfn of a PCI device
2678 *
2679 * IOMMU root bus is needed by all call sites to call into iommu_ops.
2680 * For call sites which don't need aliased BDF, passing NULL to
2681 * aliased_[bus|devfn] is allowed.
2682 *
2683 * @piommu_bus: return root #PCIBus backed by an IOMMU for the PCI device.
2684 *
2685 * @aliased_bus: return aliased #PCIBus of the PCI device, optional.
2686 *
2687 * @aliased_devfn: return aliased devfn of the PCI device, optional.
2688 */
pci_device_get_iommu_bus_devfn(PCIDevice * dev,PCIBus ** piommu_bus,PCIBus ** aliased_bus,int * aliased_devfn)2689 static void pci_device_get_iommu_bus_devfn(PCIDevice *dev,
2690 PCIBus **piommu_bus,
2691 PCIBus **aliased_bus,
2692 int *aliased_devfn)
2693 {
2694 PCIBus *bus = pci_get_bus(dev);
2695 PCIBus *iommu_bus = bus;
2696 int devfn = dev->devfn;
2697
2698 while (iommu_bus && !iommu_bus->iommu_ops && iommu_bus->parent_dev) {
2699 PCIBus *parent_bus = pci_get_bus(iommu_bus->parent_dev);
2700
2701 /*
2702 * The requester ID of the provided device may be aliased, as seen from
2703 * the IOMMU, due to topology limitations. The IOMMU relies on a
2704 * requester ID to provide a unique AddressSpace for devices, but
2705 * conventional PCI buses pre-date such concepts. Instead, the PCIe-
2706 * to-PCI bridge creates and accepts transactions on behalf of down-
2707 * stream devices. When doing so, all downstream devices are masked
2708 * (aliased) behind a single requester ID. The requester ID used
2709 * depends on the format of the bridge devices. Proper PCIe-to-PCI
2710 * bridges, with a PCIe capability indicating such, follow the
2711 * guidelines of chapter 2.3 of the PCIe-to-PCI/X bridge specification,
2712 * where the bridge uses the seconary bus as the bridge portion of the
2713 * requester ID and devfn of 00.0. For other bridges, typically those
2714 * found on the root complex such as the dmi-to-pci-bridge, we follow
2715 * the convention of typical bare-metal hardware, which uses the
2716 * requester ID of the bridge itself. There are device specific
2717 * exceptions to these rules, but these are the defaults that the
2718 * Linux kernel uses when determining DMA aliases itself and believed
2719 * to be true for the bare metal equivalents of the devices emulated
2720 * in QEMU.
2721 */
2722 if (!pci_bus_is_express(iommu_bus)) {
2723 PCIDevice *parent = iommu_bus->parent_dev;
2724
2725 if (pci_is_express(parent) &&
2726 pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) {
2727 devfn = PCI_DEVFN(0, 0);
2728 bus = iommu_bus;
2729 } else {
2730 devfn = parent->devfn;
2731 bus = parent_bus;
2732 }
2733 }
2734
2735 iommu_bus = parent_bus;
2736 }
2737
2738 assert(0 <= devfn && devfn < PCI_DEVFN_MAX);
2739 assert(iommu_bus);
2740
2741 if (pci_bus_bypass_iommu(bus) || !iommu_bus->iommu_ops) {
2742 iommu_bus = NULL;
2743 }
2744
2745 *piommu_bus = iommu_bus;
2746
2747 if (aliased_bus) {
2748 *aliased_bus = bus;
2749 }
2750
2751 if (aliased_devfn) {
2752 *aliased_devfn = devfn;
2753 }
2754 }
2755
pci_device_iommu_address_space(PCIDevice * dev)2756 AddressSpace *pci_device_iommu_address_space(PCIDevice *dev)
2757 {
2758 PCIBus *bus;
2759 PCIBus *iommu_bus;
2760 int devfn;
2761
2762 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn);
2763 if (iommu_bus) {
2764 return iommu_bus->iommu_ops->get_address_space(bus,
2765 iommu_bus->iommu_opaque, devfn);
2766 }
2767 return &address_space_memory;
2768 }
2769
pci_device_set_iommu_device(PCIDevice * dev,HostIOMMUDevice * hiod,Error ** errp)2770 bool pci_device_set_iommu_device(PCIDevice *dev, HostIOMMUDevice *hiod,
2771 Error **errp)
2772 {
2773 PCIBus *iommu_bus, *aliased_bus;
2774 int aliased_devfn;
2775
2776 /* set_iommu_device requires device's direct BDF instead of aliased BDF */
2777 pci_device_get_iommu_bus_devfn(dev, &iommu_bus,
2778 &aliased_bus, &aliased_devfn);
2779 if (iommu_bus && iommu_bus->iommu_ops->set_iommu_device) {
2780 hiod->aliased_bus = aliased_bus;
2781 hiod->aliased_devfn = aliased_devfn;
2782 return iommu_bus->iommu_ops->set_iommu_device(pci_get_bus(dev),
2783 iommu_bus->iommu_opaque,
2784 dev->devfn, hiod, errp);
2785 }
2786 return true;
2787 }
2788
pci_device_unset_iommu_device(PCIDevice * dev)2789 void pci_device_unset_iommu_device(PCIDevice *dev)
2790 {
2791 PCIBus *iommu_bus;
2792
2793 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, NULL, NULL);
2794 if (iommu_bus && iommu_bus->iommu_ops->unset_iommu_device) {
2795 return iommu_bus->iommu_ops->unset_iommu_device(pci_get_bus(dev),
2796 iommu_bus->iommu_opaque,
2797 dev->devfn);
2798 }
2799 }
2800
pci_setup_iommu(PCIBus * bus,const PCIIOMMUOps * ops,void * opaque)2801 void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque)
2802 {
2803 /*
2804 * If called, pci_setup_iommu() should provide a minimum set of
2805 * useful callbacks for the bus.
2806 */
2807 assert(ops);
2808 assert(ops->get_address_space);
2809
2810 bus->iommu_ops = ops;
2811 bus->iommu_opaque = opaque;
2812 }
2813
pci_dev_get_w64(PCIBus * b,PCIDevice * dev,void * opaque)2814 static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque)
2815 {
2816 Range *range = opaque;
2817 uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND);
2818 int i;
2819
2820 if (!(cmd & PCI_COMMAND_MEMORY)) {
2821 return;
2822 }
2823
2824 if (IS_PCI_BRIDGE(dev)) {
2825 pcibus_t base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
2826 pcibus_t limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
2827
2828 base = MAX(base, 0x1ULL << 32);
2829
2830 if (limit >= base) {
2831 Range pref_range;
2832 range_set_bounds(&pref_range, base, limit);
2833 range_extend(range, &pref_range);
2834 }
2835 }
2836 for (i = 0; i < PCI_NUM_REGIONS; ++i) {
2837 PCIIORegion *r = &dev->io_regions[i];
2838 pcibus_t lob, upb;
2839 Range region_range;
2840
2841 if (!r->size ||
2842 (r->type & PCI_BASE_ADDRESS_SPACE_IO) ||
2843 !(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) {
2844 continue;
2845 }
2846
2847 lob = pci_bar_address(dev, i, r->type, r->size);
2848 upb = lob + r->size - 1;
2849 if (lob == PCI_BAR_UNMAPPED) {
2850 continue;
2851 }
2852
2853 lob = MAX(lob, 0x1ULL << 32);
2854
2855 if (upb >= lob) {
2856 range_set_bounds(®ion_range, lob, upb);
2857 range_extend(range, ®ion_range);
2858 }
2859 }
2860 }
2861
pci_bus_get_w64_range(PCIBus * bus,Range * range)2862 void pci_bus_get_w64_range(PCIBus *bus, Range *range)
2863 {
2864 range_make_empty(range);
2865 pci_for_each_device_under_bus(bus, pci_dev_get_w64, range);
2866 }
2867
pcie_has_upstream_port(PCIDevice * dev)2868 static bool pcie_has_upstream_port(PCIDevice *dev)
2869 {
2870 PCIDevice *parent_dev = pci_bridge_get_device(pci_get_bus(dev));
2871
2872 /* Device associated with an upstream port.
2873 * As there are several types of these, it's easier to check the
2874 * parent device: upstream ports are always connected to
2875 * root or downstream ports.
2876 */
2877 return parent_dev &&
2878 pci_is_express(parent_dev) &&
2879 parent_dev->exp.exp_cap &&
2880 (pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_ROOT_PORT ||
2881 pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_DOWNSTREAM);
2882 }
2883
pci_get_function_0(PCIDevice * pci_dev)2884 PCIDevice *pci_get_function_0(PCIDevice *pci_dev)
2885 {
2886 PCIBus *bus = pci_get_bus(pci_dev);
2887
2888 if(pcie_has_upstream_port(pci_dev)) {
2889 /* With an upstream PCIe port, we only support 1 device at slot 0 */
2890 return bus->devices[0];
2891 } else {
2892 /* Other bus types might support multiple devices at slots 0-31 */
2893 return bus->devices[PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 0)];
2894 }
2895 }
2896
pci_get_msi_message(PCIDevice * dev,int vector)2897 MSIMessage pci_get_msi_message(PCIDevice *dev, int vector)
2898 {
2899 MSIMessage msg;
2900 if (msix_enabled(dev)) {
2901 msg = msix_get_message(dev, vector);
2902 } else if (msi_enabled(dev)) {
2903 msg = msi_get_message(dev, vector);
2904 } else {
2905 /* Should never happen */
2906 error_report("%s: unknown interrupt type", __func__);
2907 abort();
2908 }
2909 return msg;
2910 }
2911
pci_set_power(PCIDevice * d,bool state)2912 void pci_set_power(PCIDevice *d, bool state)
2913 {
2914 if (d->has_power == state) {
2915 return;
2916 }
2917
2918 d->has_power = state;
2919 pci_update_mappings(d);
2920 memory_region_set_enabled(&d->bus_master_enable_region,
2921 (pci_get_word(d->config + PCI_COMMAND)
2922 & PCI_COMMAND_MASTER) && d->has_power);
2923 if (!d->has_power) {
2924 pci_device_reset(d);
2925 }
2926 }
2927
2928 static const TypeInfo pci_device_type_info = {
2929 .name = TYPE_PCI_DEVICE,
2930 .parent = TYPE_DEVICE,
2931 .instance_size = sizeof(PCIDevice),
2932 .abstract = true,
2933 .class_size = sizeof(PCIDeviceClass),
2934 .class_init = pci_device_class_init,
2935 .class_base_init = pci_device_class_base_init,
2936 };
2937
pci_register_types(void)2938 static void pci_register_types(void)
2939 {
2940 type_register_static(&pci_bus_info);
2941 type_register_static(&pcie_bus_info);
2942 type_register_static(&cxl_bus_info);
2943 type_register_static(&conventional_pci_interface_info);
2944 type_register_static(&cxl_interface_info);
2945 type_register_static(&pcie_interface_info);
2946 type_register_static(&pci_device_type_info);
2947 }
2948
2949 type_init(pci_register_types)
2950