xref: /qemu/hw/vfio/pci-quirks.c (revision b65cb867)
1 /*
2  * device quirks for PCI devices
3  *
4  * Copyright Red Hat, Inc. 2012-2015
5  *
6  * Authors:
7  *  Alex Williamson <alex.williamson@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "qemu/units.h"
15 #include "qemu/error-report.h"
16 #include "qemu/main-loop.h"
17 #include "qemu/module.h"
18 #include "qemu/range.h"
19 #include "qapi/error.h"
20 #include "qapi/visitor.h"
21 #include <sys/ioctl.h>
22 #include "hw/hw.h"
23 #include "hw/nvram/fw_cfg.h"
24 #include "hw/qdev-properties.h"
25 #include "pci.h"
26 #include "trace.h"
27 
28 /* Use uin32_t for vendor & device so PCI_ANY_ID expands and cannot match hw */
29 static bool vfio_pci_is(VFIOPCIDevice *vdev, uint32_t vendor, uint32_t device)
30 {
31     return (vendor == PCI_ANY_ID || vendor == vdev->vendor_id) &&
32            (device == PCI_ANY_ID || device == vdev->device_id);
33 }
34 
35 static bool vfio_is_vga(VFIOPCIDevice *vdev)
36 {
37     PCIDevice *pdev = &vdev->pdev;
38     uint16_t class = pci_get_word(pdev->config + PCI_CLASS_DEVICE);
39 
40     return class == PCI_CLASS_DISPLAY_VGA;
41 }
42 
43 /*
44  * List of device ids/vendor ids for which to disable
45  * option rom loading. This avoids the guest hangs during rom
46  * execution as noticed with the BCM 57810 card for lack of a
47  * more better way to handle such issues.
48  * The  user can still override by specifying a romfile or
49  * rombar=1.
50  * Please see https://bugs.launchpad.net/qemu/+bug/1284874
51  * for an analysis of the 57810 card hang. When adding
52  * a new vendor id/device id combination below, please also add
53  * your card/environment details and information that could
54  * help in debugging to the bug tracking this issue
55  */
56 static const struct {
57     uint32_t vendor;
58     uint32_t device;
59 } romblacklist[] = {
60     { 0x14e4, 0x168e }, /* Broadcom BCM 57810 */
61 };
62 
63 bool vfio_blacklist_opt_rom(VFIOPCIDevice *vdev)
64 {
65     int i;
66 
67     for (i = 0 ; i < ARRAY_SIZE(romblacklist); i++) {
68         if (vfio_pci_is(vdev, romblacklist[i].vendor, romblacklist[i].device)) {
69             trace_vfio_quirk_rom_blacklisted(vdev->vbasedev.name,
70                                              romblacklist[i].vendor,
71                                              romblacklist[i].device);
72             return true;
73         }
74     }
75     return false;
76 }
77 
78 /*
79  * Device specific region quirks (mostly backdoors to PCI config space)
80  */
81 
82 /*
83  * The generic window quirks operate on an address and data register,
84  * vfio_generic_window_address_quirk handles the address register and
85  * vfio_generic_window_data_quirk handles the data register.  These ops
86  * pass reads and writes through to hardware until a value matching the
87  * stored address match/mask is written.  When this occurs, the data
88  * register access emulated PCI config space for the device rather than
89  * passing through accesses.  This enables devices where PCI config space
90  * is accessible behind a window register to maintain the virtualization
91  * provided through vfio.
92  */
93 typedef struct VFIOConfigWindowMatch {
94     uint32_t match;
95     uint32_t mask;
96 } VFIOConfigWindowMatch;
97 
98 typedef struct VFIOConfigWindowQuirk {
99     struct VFIOPCIDevice *vdev;
100 
101     uint32_t address_val;
102 
103     uint32_t address_offset;
104     uint32_t data_offset;
105 
106     bool window_enabled;
107     uint8_t bar;
108 
109     MemoryRegion *addr_mem;
110     MemoryRegion *data_mem;
111 
112     uint32_t nr_matches;
113     VFIOConfigWindowMatch matches[];
114 } VFIOConfigWindowQuirk;
115 
116 static uint64_t vfio_generic_window_quirk_address_read(void *opaque,
117                                                        hwaddr addr,
118                                                        unsigned size)
119 {
120     VFIOConfigWindowQuirk *window = opaque;
121     VFIOPCIDevice *vdev = window->vdev;
122 
123     return vfio_region_read(&vdev->bars[window->bar].region,
124                             addr + window->address_offset, size);
125 }
126 
127 static void vfio_generic_window_quirk_address_write(void *opaque, hwaddr addr,
128                                                     uint64_t data,
129                                                     unsigned size)
130 {
131     VFIOConfigWindowQuirk *window = opaque;
132     VFIOPCIDevice *vdev = window->vdev;
133     int i;
134 
135     window->window_enabled = false;
136 
137     vfio_region_write(&vdev->bars[window->bar].region,
138                       addr + window->address_offset, data, size);
139 
140     for (i = 0; i < window->nr_matches; i++) {
141         if ((data & ~window->matches[i].mask) == window->matches[i].match) {
142             window->window_enabled = true;
143             window->address_val = data & window->matches[i].mask;
144             trace_vfio_quirk_generic_window_address_write(vdev->vbasedev.name,
145                                     memory_region_name(window->addr_mem), data);
146             break;
147         }
148     }
149 }
150 
151 static const MemoryRegionOps vfio_generic_window_address_quirk = {
152     .read = vfio_generic_window_quirk_address_read,
153     .write = vfio_generic_window_quirk_address_write,
154     .endianness = DEVICE_LITTLE_ENDIAN,
155 };
156 
157 static uint64_t vfio_generic_window_quirk_data_read(void *opaque,
158                                                     hwaddr addr, unsigned size)
159 {
160     VFIOConfigWindowQuirk *window = opaque;
161     VFIOPCIDevice *vdev = window->vdev;
162     uint64_t data;
163 
164     /* Always read data reg, discard if window enabled */
165     data = vfio_region_read(&vdev->bars[window->bar].region,
166                             addr + window->data_offset, size);
167 
168     if (window->window_enabled) {
169         data = vfio_pci_read_config(&vdev->pdev, window->address_val, size);
170         trace_vfio_quirk_generic_window_data_read(vdev->vbasedev.name,
171                                     memory_region_name(window->data_mem), data);
172     }
173 
174     return data;
175 }
176 
177 static void vfio_generic_window_quirk_data_write(void *opaque, hwaddr addr,
178                                                  uint64_t data, unsigned size)
179 {
180     VFIOConfigWindowQuirk *window = opaque;
181     VFIOPCIDevice *vdev = window->vdev;
182 
183     if (window->window_enabled) {
184         vfio_pci_write_config(&vdev->pdev, window->address_val, data, size);
185         trace_vfio_quirk_generic_window_data_write(vdev->vbasedev.name,
186                                     memory_region_name(window->data_mem), data);
187         return;
188     }
189 
190     vfio_region_write(&vdev->bars[window->bar].region,
191                       addr + window->data_offset, data, size);
192 }
193 
194 static const MemoryRegionOps vfio_generic_window_data_quirk = {
195     .read = vfio_generic_window_quirk_data_read,
196     .write = vfio_generic_window_quirk_data_write,
197     .endianness = DEVICE_LITTLE_ENDIAN,
198 };
199 
200 /*
201  * The generic mirror quirk handles devices which expose PCI config space
202  * through a region within a BAR.  When enabled, reads and writes are
203  * redirected through to emulated PCI config space.  XXX if PCI config space
204  * used memory regions, this could just be an alias.
205  */
206 typedef struct VFIOConfigMirrorQuirk {
207     struct VFIOPCIDevice *vdev;
208     uint32_t offset;
209     uint8_t bar;
210     MemoryRegion *mem;
211     uint8_t data[];
212 } VFIOConfigMirrorQuirk;
213 
214 static uint64_t vfio_generic_quirk_mirror_read(void *opaque,
215                                                hwaddr addr, unsigned size)
216 {
217     VFIOConfigMirrorQuirk *mirror = opaque;
218     VFIOPCIDevice *vdev = mirror->vdev;
219     uint64_t data;
220 
221     /* Read and discard in case the hardware cares */
222     (void)vfio_region_read(&vdev->bars[mirror->bar].region,
223                            addr + mirror->offset, size);
224 
225     data = vfio_pci_read_config(&vdev->pdev, addr, size);
226     trace_vfio_quirk_generic_mirror_read(vdev->vbasedev.name,
227                                          memory_region_name(mirror->mem),
228                                          addr, data);
229     return data;
230 }
231 
232 static void vfio_generic_quirk_mirror_write(void *opaque, hwaddr addr,
233                                             uint64_t data, unsigned size)
234 {
235     VFIOConfigMirrorQuirk *mirror = opaque;
236     VFIOPCIDevice *vdev = mirror->vdev;
237 
238     vfio_pci_write_config(&vdev->pdev, addr, data, size);
239     trace_vfio_quirk_generic_mirror_write(vdev->vbasedev.name,
240                                           memory_region_name(mirror->mem),
241                                           addr, data);
242 }
243 
244 static const MemoryRegionOps vfio_generic_mirror_quirk = {
245     .read = vfio_generic_quirk_mirror_read,
246     .write = vfio_generic_quirk_mirror_write,
247     .endianness = DEVICE_LITTLE_ENDIAN,
248 };
249 
250 /* Is range1 fully contained within range2?  */
251 static bool vfio_range_contained(uint64_t first1, uint64_t len1,
252                                  uint64_t first2, uint64_t len2) {
253     return (first1 >= first2 && first1 + len1 <= first2 + len2);
254 }
255 
256 #define PCI_VENDOR_ID_ATI               0x1002
257 
258 /*
259  * Radeon HD cards (HD5450 & HD7850) report the upper byte of the I/O port BAR
260  * through VGA register 0x3c3.  On newer cards, the I/O port BAR is always
261  * BAR4 (older cards like the X550 used BAR1, but we don't care to support
262  * those).  Note that on bare metal, a read of 0x3c3 doesn't always return the
263  * I/O port BAR address.  Originally this was coded to return the virtual BAR
264  * address only if the physical register read returns the actual BAR address,
265  * but users have reported greater success if we return the virtual address
266  * unconditionally.
267  */
268 static uint64_t vfio_ati_3c3_quirk_read(void *opaque,
269                                         hwaddr addr, unsigned size)
270 {
271     VFIOPCIDevice *vdev = opaque;
272     uint64_t data = vfio_pci_read_config(&vdev->pdev,
273                                          PCI_BASE_ADDRESS_4 + 1, size);
274 
275     trace_vfio_quirk_ati_3c3_read(vdev->vbasedev.name, data);
276 
277     return data;
278 }
279 
280 static const MemoryRegionOps vfio_ati_3c3_quirk = {
281     .read = vfio_ati_3c3_quirk_read,
282     .endianness = DEVICE_LITTLE_ENDIAN,
283 };
284 
285 static VFIOQuirk *vfio_quirk_alloc(int nr_mem)
286 {
287     VFIOQuirk *quirk = g_new0(VFIOQuirk, 1);
288     QLIST_INIT(&quirk->ioeventfds);
289     quirk->mem = g_new0(MemoryRegion, nr_mem);
290     quirk->nr_mem = nr_mem;
291 
292     return quirk;
293 }
294 
295 static void vfio_ioeventfd_exit(VFIOPCIDevice *vdev, VFIOIOEventFD *ioeventfd)
296 {
297     QLIST_REMOVE(ioeventfd, next);
298     memory_region_del_eventfd(ioeventfd->mr, ioeventfd->addr, ioeventfd->size,
299                               true, ioeventfd->data, &ioeventfd->e);
300 
301     if (ioeventfd->vfio) {
302         struct vfio_device_ioeventfd vfio_ioeventfd;
303 
304         vfio_ioeventfd.argsz = sizeof(vfio_ioeventfd);
305         vfio_ioeventfd.flags = ioeventfd->size;
306         vfio_ioeventfd.data = ioeventfd->data;
307         vfio_ioeventfd.offset = ioeventfd->region->fd_offset +
308                                 ioeventfd->region_addr;
309         vfio_ioeventfd.fd = -1;
310 
311         if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_IOEVENTFD, &vfio_ioeventfd)) {
312             error_report("Failed to remove vfio ioeventfd for %s+0x%"
313                          HWADDR_PRIx"[%d]:0x%"PRIx64" (%m)",
314                          memory_region_name(ioeventfd->mr), ioeventfd->addr,
315                          ioeventfd->size, ioeventfd->data);
316         }
317     } else {
318         qemu_set_fd_handler(event_notifier_get_fd(&ioeventfd->e),
319                             NULL, NULL, NULL);
320     }
321 
322     event_notifier_cleanup(&ioeventfd->e);
323     trace_vfio_ioeventfd_exit(memory_region_name(ioeventfd->mr),
324                               (uint64_t)ioeventfd->addr, ioeventfd->size,
325                               ioeventfd->data);
326     g_free(ioeventfd);
327 }
328 
329 static void vfio_drop_dynamic_eventfds(VFIOPCIDevice *vdev, VFIOQuirk *quirk)
330 {
331     VFIOIOEventFD *ioeventfd, *tmp;
332 
333     QLIST_FOREACH_SAFE(ioeventfd, &quirk->ioeventfds, next, tmp) {
334         if (ioeventfd->dynamic) {
335             vfio_ioeventfd_exit(vdev, ioeventfd);
336         }
337     }
338 }
339 
340 static void vfio_ioeventfd_handler(void *opaque)
341 {
342     VFIOIOEventFD *ioeventfd = opaque;
343 
344     if (event_notifier_test_and_clear(&ioeventfd->e)) {
345         vfio_region_write(ioeventfd->region, ioeventfd->region_addr,
346                           ioeventfd->data, ioeventfd->size);
347         trace_vfio_ioeventfd_handler(memory_region_name(ioeventfd->mr),
348                                      (uint64_t)ioeventfd->addr, ioeventfd->size,
349                                      ioeventfd->data);
350     }
351 }
352 
353 static VFIOIOEventFD *vfio_ioeventfd_init(VFIOPCIDevice *vdev,
354                                           MemoryRegion *mr, hwaddr addr,
355                                           unsigned size, uint64_t data,
356                                           VFIORegion *region,
357                                           hwaddr region_addr, bool dynamic)
358 {
359     VFIOIOEventFD *ioeventfd;
360 
361     if (vdev->no_kvm_ioeventfd) {
362         return NULL;
363     }
364 
365     ioeventfd = g_malloc0(sizeof(*ioeventfd));
366 
367     if (event_notifier_init(&ioeventfd->e, 0)) {
368         g_free(ioeventfd);
369         return NULL;
370     }
371 
372     /*
373      * MemoryRegion and relative offset, plus additional ioeventfd setup
374      * parameters for configuring and later tearing down KVM ioeventfd.
375      */
376     ioeventfd->mr = mr;
377     ioeventfd->addr = addr;
378     ioeventfd->size = size;
379     ioeventfd->data = data;
380     ioeventfd->dynamic = dynamic;
381     /*
382      * VFIORegion and relative offset for implementing the userspace
383      * handler.  data & size fields shared for both uses.
384      */
385     ioeventfd->region = region;
386     ioeventfd->region_addr = region_addr;
387 
388     if (!vdev->no_vfio_ioeventfd) {
389         struct vfio_device_ioeventfd vfio_ioeventfd;
390 
391         vfio_ioeventfd.argsz = sizeof(vfio_ioeventfd);
392         vfio_ioeventfd.flags = ioeventfd->size;
393         vfio_ioeventfd.data = ioeventfd->data;
394         vfio_ioeventfd.offset = ioeventfd->region->fd_offset +
395                                 ioeventfd->region_addr;
396         vfio_ioeventfd.fd = event_notifier_get_fd(&ioeventfd->e);
397 
398         ioeventfd->vfio = !ioctl(vdev->vbasedev.fd,
399                                  VFIO_DEVICE_IOEVENTFD, &vfio_ioeventfd);
400     }
401 
402     if (!ioeventfd->vfio) {
403         qemu_set_fd_handler(event_notifier_get_fd(&ioeventfd->e),
404                             vfio_ioeventfd_handler, NULL, ioeventfd);
405     }
406 
407     memory_region_add_eventfd(ioeventfd->mr, ioeventfd->addr, ioeventfd->size,
408                               true, ioeventfd->data, &ioeventfd->e);
409     trace_vfio_ioeventfd_init(memory_region_name(mr), (uint64_t)addr,
410                               size, data, ioeventfd->vfio);
411 
412     return ioeventfd;
413 }
414 
415 static void vfio_vga_probe_ati_3c3_quirk(VFIOPCIDevice *vdev)
416 {
417     VFIOQuirk *quirk;
418 
419     /*
420      * As long as the BAR is >= 256 bytes it will be aligned such that the
421      * lower byte is always zero.  Filter out anything else, if it exists.
422      */
423     if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) ||
424         !vdev->bars[4].ioport || vdev->bars[4].region.size < 256) {
425         return;
426     }
427 
428     quirk = vfio_quirk_alloc(1);
429 
430     memory_region_init_io(quirk->mem, OBJECT(vdev), &vfio_ati_3c3_quirk, vdev,
431                           "vfio-ati-3c3-quirk", 1);
432     memory_region_add_subregion(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
433                                 3 /* offset 3 bytes from 0x3c0 */, quirk->mem);
434 
435     QLIST_INSERT_HEAD(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks,
436                       quirk, next);
437 
438     trace_vfio_quirk_ati_3c3_probe(vdev->vbasedev.name);
439 }
440 
441 /*
442  * Newer ATI/AMD devices, including HD5450 and HD7850, have a mirror to PCI
443  * config space through MMIO BAR2 at offset 0x4000.  Nothing seems to access
444  * the MMIO space directly, but a window to this space is provided through
445  * I/O port BAR4.  Offset 0x0 is the address register and offset 0x4 is the
446  * data register.  When the address is programmed to a range of 0x4000-0x4fff
447  * PCI configuration space is available.  Experimentation seems to indicate
448  * that read-only may be provided by hardware.
449  */
450 static void vfio_probe_ati_bar4_quirk(VFIOPCIDevice *vdev, int nr)
451 {
452     VFIOQuirk *quirk;
453     VFIOConfigWindowQuirk *window;
454 
455     /* This windows doesn't seem to be used except by legacy VGA code */
456     if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) ||
457         !vdev->vga || nr != 4) {
458         return;
459     }
460 
461     quirk = vfio_quirk_alloc(2);
462     window = quirk->data = g_malloc0(sizeof(*window) +
463                                      sizeof(VFIOConfigWindowMatch));
464     window->vdev = vdev;
465     window->address_offset = 0;
466     window->data_offset = 4;
467     window->nr_matches = 1;
468     window->matches[0].match = 0x4000;
469     window->matches[0].mask = vdev->config_size - 1;
470     window->bar = nr;
471     window->addr_mem = &quirk->mem[0];
472     window->data_mem = &quirk->mem[1];
473 
474     memory_region_init_io(window->addr_mem, OBJECT(vdev),
475                           &vfio_generic_window_address_quirk, window,
476                           "vfio-ati-bar4-window-address-quirk", 4);
477     memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
478                                         window->address_offset,
479                                         window->addr_mem, 1);
480 
481     memory_region_init_io(window->data_mem, OBJECT(vdev),
482                           &vfio_generic_window_data_quirk, window,
483                           "vfio-ati-bar4-window-data-quirk", 4);
484     memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
485                                         window->data_offset,
486                                         window->data_mem, 1);
487 
488     QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
489 
490     trace_vfio_quirk_ati_bar4_probe(vdev->vbasedev.name);
491 }
492 
493 /*
494  * Trap the BAR2 MMIO mirror to config space as well.
495  */
496 static void vfio_probe_ati_bar2_quirk(VFIOPCIDevice *vdev, int nr)
497 {
498     VFIOQuirk *quirk;
499     VFIOConfigMirrorQuirk *mirror;
500 
501     /* Only enable on newer devices where BAR2 is 64bit */
502     if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) ||
503         !vdev->vga || nr != 2 || !vdev->bars[2].mem64) {
504         return;
505     }
506 
507     quirk = vfio_quirk_alloc(1);
508     mirror = quirk->data = g_malloc0(sizeof(*mirror));
509     mirror->mem = quirk->mem;
510     mirror->vdev = vdev;
511     mirror->offset = 0x4000;
512     mirror->bar = nr;
513 
514     memory_region_init_io(mirror->mem, OBJECT(vdev),
515                           &vfio_generic_mirror_quirk, mirror,
516                           "vfio-ati-bar2-4000-quirk", PCI_CONFIG_SPACE_SIZE);
517     memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
518                                         mirror->offset, mirror->mem, 1);
519 
520     QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
521 
522     trace_vfio_quirk_ati_bar2_probe(vdev->vbasedev.name);
523 }
524 
525 /*
526  * Older ATI/AMD cards like the X550 have a similar window to that above.
527  * I/O port BAR1 provides a window to a mirror of PCI config space located
528  * in BAR2 at offset 0xf00.  We don't care to support such older cards, but
529  * note it for future reference.
530  */
531 
532 /*
533  * Nvidia has several different methods to get to config space, the
534  * nouveu project has several of these documented here:
535  * https://github.com/pathscale/envytools/tree/master/hwdocs
536  *
537  * The first quirk is actually not documented in envytools and is found
538  * on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]).  This is an
539  * NV46 chipset.  The backdoor uses the legacy VGA I/O ports to access
540  * the mirror of PCI config space found at BAR0 offset 0x1800.  The access
541  * sequence first writes 0x338 to I/O port 0x3d4.  The target offset is
542  * then written to 0x3d0.  Finally 0x538 is written for a read and 0x738
543  * is written for a write to 0x3d4.  The BAR0 offset is then accessible
544  * through 0x3d0.  This quirk doesn't seem to be necessary on newer cards
545  * that use the I/O port BAR5 window but it doesn't hurt to leave it.
546  */
547 typedef enum {NONE = 0, SELECT, WINDOW, READ, WRITE} VFIONvidia3d0State;
548 static const char *nv3d0_states[] = { "NONE", "SELECT",
549                                       "WINDOW", "READ", "WRITE" };
550 
551 typedef struct VFIONvidia3d0Quirk {
552     VFIOPCIDevice *vdev;
553     VFIONvidia3d0State state;
554     uint32_t offset;
555 } VFIONvidia3d0Quirk;
556 
557 static uint64_t vfio_nvidia_3d4_quirk_read(void *opaque,
558                                            hwaddr addr, unsigned size)
559 {
560     VFIONvidia3d0Quirk *quirk = opaque;
561     VFIOPCIDevice *vdev = quirk->vdev;
562 
563     quirk->state = NONE;
564 
565     return vfio_vga_read(&vdev->vga->region[QEMU_PCI_VGA_IO_HI],
566                          addr + 0x14, size);
567 }
568 
569 static void vfio_nvidia_3d4_quirk_write(void *opaque, hwaddr addr,
570                                         uint64_t data, unsigned size)
571 {
572     VFIONvidia3d0Quirk *quirk = opaque;
573     VFIOPCIDevice *vdev = quirk->vdev;
574     VFIONvidia3d0State old_state = quirk->state;
575 
576     quirk->state = NONE;
577 
578     switch (data) {
579     case 0x338:
580         if (old_state == NONE) {
581             quirk->state = SELECT;
582             trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name,
583                                               nv3d0_states[quirk->state]);
584         }
585         break;
586     case 0x538:
587         if (old_state == WINDOW) {
588             quirk->state = READ;
589             trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name,
590                                               nv3d0_states[quirk->state]);
591         }
592         break;
593     case 0x738:
594         if (old_state == WINDOW) {
595             quirk->state = WRITE;
596             trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name,
597                                               nv3d0_states[quirk->state]);
598         }
599         break;
600     }
601 
602     vfio_vga_write(&vdev->vga->region[QEMU_PCI_VGA_IO_HI],
603                    addr + 0x14, data, size);
604 }
605 
606 static const MemoryRegionOps vfio_nvidia_3d4_quirk = {
607     .read = vfio_nvidia_3d4_quirk_read,
608     .write = vfio_nvidia_3d4_quirk_write,
609     .endianness = DEVICE_LITTLE_ENDIAN,
610 };
611 
612 static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque,
613                                            hwaddr addr, unsigned size)
614 {
615     VFIONvidia3d0Quirk *quirk = opaque;
616     VFIOPCIDevice *vdev = quirk->vdev;
617     VFIONvidia3d0State old_state = quirk->state;
618     uint64_t data = vfio_vga_read(&vdev->vga->region[QEMU_PCI_VGA_IO_HI],
619                                   addr + 0x10, size);
620 
621     quirk->state = NONE;
622 
623     if (old_state == READ &&
624         (quirk->offset & ~(PCI_CONFIG_SPACE_SIZE - 1)) == 0x1800) {
625         uint8_t offset = quirk->offset & (PCI_CONFIG_SPACE_SIZE - 1);
626 
627         data = vfio_pci_read_config(&vdev->pdev, offset, size);
628         trace_vfio_quirk_nvidia_3d0_read(vdev->vbasedev.name,
629                                          offset, size, data);
630     }
631 
632     return data;
633 }
634 
635 static void vfio_nvidia_3d0_quirk_write(void *opaque, hwaddr addr,
636                                         uint64_t data, unsigned size)
637 {
638     VFIONvidia3d0Quirk *quirk = opaque;
639     VFIOPCIDevice *vdev = quirk->vdev;
640     VFIONvidia3d0State old_state = quirk->state;
641 
642     quirk->state = NONE;
643 
644     if (old_state == SELECT) {
645         quirk->offset = (uint32_t)data;
646         quirk->state = WINDOW;
647         trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name,
648                                           nv3d0_states[quirk->state]);
649     } else if (old_state == WRITE) {
650         if ((quirk->offset & ~(PCI_CONFIG_SPACE_SIZE - 1)) == 0x1800) {
651             uint8_t offset = quirk->offset & (PCI_CONFIG_SPACE_SIZE - 1);
652 
653             vfio_pci_write_config(&vdev->pdev, offset, data, size);
654             trace_vfio_quirk_nvidia_3d0_write(vdev->vbasedev.name,
655                                               offset, data, size);
656             return;
657         }
658     }
659 
660     vfio_vga_write(&vdev->vga->region[QEMU_PCI_VGA_IO_HI],
661                    addr + 0x10, data, size);
662 }
663 
664 static const MemoryRegionOps vfio_nvidia_3d0_quirk = {
665     .read = vfio_nvidia_3d0_quirk_read,
666     .write = vfio_nvidia_3d0_quirk_write,
667     .endianness = DEVICE_LITTLE_ENDIAN,
668 };
669 
670 static void vfio_vga_probe_nvidia_3d0_quirk(VFIOPCIDevice *vdev)
671 {
672     VFIOQuirk *quirk;
673     VFIONvidia3d0Quirk *data;
674 
675     if (vdev->no_geforce_quirks ||
676         !vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) ||
677         !vdev->bars[1].region.size) {
678         return;
679     }
680 
681     quirk = vfio_quirk_alloc(2);
682     quirk->data = data = g_malloc0(sizeof(*data));
683     data->vdev = vdev;
684 
685     memory_region_init_io(&quirk->mem[0], OBJECT(vdev), &vfio_nvidia_3d4_quirk,
686                           data, "vfio-nvidia-3d4-quirk", 2);
687     memory_region_add_subregion(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
688                                 0x14 /* 0x3c0 + 0x14 */, &quirk->mem[0]);
689 
690     memory_region_init_io(&quirk->mem[1], OBJECT(vdev), &vfio_nvidia_3d0_quirk,
691                           data, "vfio-nvidia-3d0-quirk", 2);
692     memory_region_add_subregion(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
693                                 0x10 /* 0x3c0 + 0x10 */, &quirk->mem[1]);
694 
695     QLIST_INSERT_HEAD(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks,
696                       quirk, next);
697 
698     trace_vfio_quirk_nvidia_3d0_probe(vdev->vbasedev.name);
699 }
700 
701 /*
702  * The second quirk is documented in envytools.  The I/O port BAR5 is just
703  * a set of address/data ports to the MMIO BARs.  The BAR we care about is
704  * again BAR0.  This backdoor is apparently a bit newer than the one above
705  * so we need to not only trap 256 bytes @0x1800, but all of PCI config
706  * space, including extended space is available at the 4k @0x88000.
707  */
708 typedef struct VFIONvidiaBAR5Quirk {
709     uint32_t master;
710     uint32_t enable;
711     MemoryRegion *addr_mem;
712     MemoryRegion *data_mem;
713     bool enabled;
714     VFIOConfigWindowQuirk window; /* last for match data */
715 } VFIONvidiaBAR5Quirk;
716 
717 static void vfio_nvidia_bar5_enable(VFIONvidiaBAR5Quirk *bar5)
718 {
719     VFIOPCIDevice *vdev = bar5->window.vdev;
720 
721     if (((bar5->master & bar5->enable) & 0x1) == bar5->enabled) {
722         return;
723     }
724 
725     bar5->enabled = !bar5->enabled;
726     trace_vfio_quirk_nvidia_bar5_state(vdev->vbasedev.name,
727                                        bar5->enabled ?  "Enable" : "Disable");
728     memory_region_set_enabled(bar5->addr_mem, bar5->enabled);
729     memory_region_set_enabled(bar5->data_mem, bar5->enabled);
730 }
731 
732 static uint64_t vfio_nvidia_bar5_quirk_master_read(void *opaque,
733                                                    hwaddr addr, unsigned size)
734 {
735     VFIONvidiaBAR5Quirk *bar5 = opaque;
736     VFIOPCIDevice *vdev = bar5->window.vdev;
737 
738     return vfio_region_read(&vdev->bars[5].region, addr, size);
739 }
740 
741 static void vfio_nvidia_bar5_quirk_master_write(void *opaque, hwaddr addr,
742                                                 uint64_t data, unsigned size)
743 {
744     VFIONvidiaBAR5Quirk *bar5 = opaque;
745     VFIOPCIDevice *vdev = bar5->window.vdev;
746 
747     vfio_region_write(&vdev->bars[5].region, addr, data, size);
748 
749     bar5->master = data;
750     vfio_nvidia_bar5_enable(bar5);
751 }
752 
753 static const MemoryRegionOps vfio_nvidia_bar5_quirk_master = {
754     .read = vfio_nvidia_bar5_quirk_master_read,
755     .write = vfio_nvidia_bar5_quirk_master_write,
756     .endianness = DEVICE_LITTLE_ENDIAN,
757 };
758 
759 static uint64_t vfio_nvidia_bar5_quirk_enable_read(void *opaque,
760                                                    hwaddr addr, unsigned size)
761 {
762     VFIONvidiaBAR5Quirk *bar5 = opaque;
763     VFIOPCIDevice *vdev = bar5->window.vdev;
764 
765     return vfio_region_read(&vdev->bars[5].region, addr + 4, size);
766 }
767 
768 static void vfio_nvidia_bar5_quirk_enable_write(void *opaque, hwaddr addr,
769                                                 uint64_t data, unsigned size)
770 {
771     VFIONvidiaBAR5Quirk *bar5 = opaque;
772     VFIOPCIDevice *vdev = bar5->window.vdev;
773 
774     vfio_region_write(&vdev->bars[5].region, addr + 4, data, size);
775 
776     bar5->enable = data;
777     vfio_nvidia_bar5_enable(bar5);
778 }
779 
780 static const MemoryRegionOps vfio_nvidia_bar5_quirk_enable = {
781     .read = vfio_nvidia_bar5_quirk_enable_read,
782     .write = vfio_nvidia_bar5_quirk_enable_write,
783     .endianness = DEVICE_LITTLE_ENDIAN,
784 };
785 
786 static void vfio_probe_nvidia_bar5_quirk(VFIOPCIDevice *vdev, int nr)
787 {
788     VFIOQuirk *quirk;
789     VFIONvidiaBAR5Quirk *bar5;
790     VFIOConfigWindowQuirk *window;
791 
792     if (vdev->no_geforce_quirks ||
793         !vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) ||
794         !vdev->vga || nr != 5 || !vdev->bars[5].ioport) {
795         return;
796     }
797 
798     quirk = vfio_quirk_alloc(4);
799     bar5 = quirk->data = g_malloc0(sizeof(*bar5) +
800                                    (sizeof(VFIOConfigWindowMatch) * 2));
801     window = &bar5->window;
802 
803     window->vdev = vdev;
804     window->address_offset = 0x8;
805     window->data_offset = 0xc;
806     window->nr_matches = 2;
807     window->matches[0].match = 0x1800;
808     window->matches[0].mask = PCI_CONFIG_SPACE_SIZE - 1;
809     window->matches[1].match = 0x88000;
810     window->matches[1].mask = vdev->config_size - 1;
811     window->bar = nr;
812     window->addr_mem = bar5->addr_mem = &quirk->mem[0];
813     window->data_mem = bar5->data_mem = &quirk->mem[1];
814 
815     memory_region_init_io(window->addr_mem, OBJECT(vdev),
816                           &vfio_generic_window_address_quirk, window,
817                           "vfio-nvidia-bar5-window-address-quirk", 4);
818     memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
819                                         window->address_offset,
820                                         window->addr_mem, 1);
821     memory_region_set_enabled(window->addr_mem, false);
822 
823     memory_region_init_io(window->data_mem, OBJECT(vdev),
824                           &vfio_generic_window_data_quirk, window,
825                           "vfio-nvidia-bar5-window-data-quirk", 4);
826     memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
827                                         window->data_offset,
828                                         window->data_mem, 1);
829     memory_region_set_enabled(window->data_mem, false);
830 
831     memory_region_init_io(&quirk->mem[2], OBJECT(vdev),
832                           &vfio_nvidia_bar5_quirk_master, bar5,
833                           "vfio-nvidia-bar5-master-quirk", 4);
834     memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
835                                         0, &quirk->mem[2], 1);
836 
837     memory_region_init_io(&quirk->mem[3], OBJECT(vdev),
838                           &vfio_nvidia_bar5_quirk_enable, bar5,
839                           "vfio-nvidia-bar5-enable-quirk", 4);
840     memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
841                                         4, &quirk->mem[3], 1);
842 
843     QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
844 
845     trace_vfio_quirk_nvidia_bar5_probe(vdev->vbasedev.name);
846 }
847 
848 typedef struct LastDataSet {
849     VFIOQuirk *quirk;
850     hwaddr addr;
851     uint64_t data;
852     unsigned size;
853     int hits;
854     int added;
855 } LastDataSet;
856 
857 #define MAX_DYN_IOEVENTFD 10
858 #define HITS_FOR_IOEVENTFD 10
859 
860 /*
861  * Finally, BAR0 itself.  We want to redirect any accesses to either
862  * 0x1800 or 0x88000 through the PCI config space access functions.
863  */
864 static void vfio_nvidia_quirk_mirror_write(void *opaque, hwaddr addr,
865                                            uint64_t data, unsigned size)
866 {
867     VFIOConfigMirrorQuirk *mirror = opaque;
868     VFIOPCIDevice *vdev = mirror->vdev;
869     PCIDevice *pdev = &vdev->pdev;
870     LastDataSet *last = (LastDataSet *)&mirror->data;
871 
872     vfio_generic_quirk_mirror_write(opaque, addr, data, size);
873 
874     /*
875      * Nvidia seems to acknowledge MSI interrupts by writing 0xff to the
876      * MSI capability ID register.  Both the ID and next register are
877      * read-only, so we allow writes covering either of those to real hw.
878      */
879     if ((pdev->cap_present & QEMU_PCI_CAP_MSI) &&
880         vfio_range_contained(addr, size, pdev->msi_cap, PCI_MSI_FLAGS)) {
881         vfio_region_write(&vdev->bars[mirror->bar].region,
882                           addr + mirror->offset, data, size);
883         trace_vfio_quirk_nvidia_bar0_msi_ack(vdev->vbasedev.name);
884     }
885 
886     /*
887      * Automatically add an ioeventfd to handle any repeated write with the
888      * same data and size above the standard PCI config space header.  This is
889      * primarily expected to accelerate the MSI-ACK behavior, such as noted
890      * above.  Current hardware/drivers should trigger an ioeventfd at config
891      * offset 0x704 (region offset 0x88704), with data 0x0, size 4.
892      *
893      * The criteria of 10 successive hits is arbitrary but reliably adds the
894      * MSI-ACK region.  Note that as some writes are bypassed via the ioeventfd,
895      * the remaining ones have a greater chance of being seen successively.
896      * To avoid the pathological case of burning up all of QEMU's open file
897      * handles, arbitrarily limit this algorithm from adding no more than 10
898      * ioeventfds, print an error if we would have added an 11th, and then
899      * stop counting.
900      */
901     if (!vdev->no_kvm_ioeventfd &&
902         addr >= PCI_STD_HEADER_SIZEOF && last->added <= MAX_DYN_IOEVENTFD) {
903         if (addr != last->addr || data != last->data || size != last->size) {
904             last->addr = addr;
905             last->data = data;
906             last->size = size;
907             last->hits = 1;
908         } else if (++last->hits >= HITS_FOR_IOEVENTFD) {
909             if (last->added < MAX_DYN_IOEVENTFD) {
910                 VFIOIOEventFD *ioeventfd;
911                 ioeventfd = vfio_ioeventfd_init(vdev, mirror->mem, addr, size,
912                                         data, &vdev->bars[mirror->bar].region,
913                                         mirror->offset + addr, true);
914                 if (ioeventfd) {
915                     VFIOQuirk *quirk = last->quirk;
916 
917                     QLIST_INSERT_HEAD(&quirk->ioeventfds, ioeventfd, next);
918                     last->added++;
919                 }
920             } else {
921                 last->added++;
922                 warn_report("NVIDIA ioeventfd queue full for %s, unable to "
923                             "accelerate 0x%"HWADDR_PRIx", data 0x%"PRIx64", "
924                             "size %u", vdev->vbasedev.name, addr, data, size);
925             }
926         }
927     }
928 }
929 
930 static const MemoryRegionOps vfio_nvidia_mirror_quirk = {
931     .read = vfio_generic_quirk_mirror_read,
932     .write = vfio_nvidia_quirk_mirror_write,
933     .endianness = DEVICE_LITTLE_ENDIAN,
934 };
935 
936 static void vfio_nvidia_bar0_quirk_reset(VFIOPCIDevice *vdev, VFIOQuirk *quirk)
937 {
938     VFIOConfigMirrorQuirk *mirror = quirk->data;
939     LastDataSet *last = (LastDataSet *)&mirror->data;
940 
941     last->addr = last->data = last->size = last->hits = last->added = 0;
942 
943     vfio_drop_dynamic_eventfds(vdev, quirk);
944 }
945 
946 static void vfio_probe_nvidia_bar0_quirk(VFIOPCIDevice *vdev, int nr)
947 {
948     VFIOQuirk *quirk;
949     VFIOConfigMirrorQuirk *mirror;
950     LastDataSet *last;
951 
952     if (vdev->no_geforce_quirks ||
953         !vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) ||
954         !vfio_is_vga(vdev) || nr != 0) {
955         return;
956     }
957 
958     quirk = vfio_quirk_alloc(1);
959     quirk->reset = vfio_nvidia_bar0_quirk_reset;
960     mirror = quirk->data = g_malloc0(sizeof(*mirror) + sizeof(LastDataSet));
961     mirror->mem = quirk->mem;
962     mirror->vdev = vdev;
963     mirror->offset = 0x88000;
964     mirror->bar = nr;
965     last = (LastDataSet *)&mirror->data;
966     last->quirk = quirk;
967 
968     memory_region_init_io(mirror->mem, OBJECT(vdev),
969                           &vfio_nvidia_mirror_quirk, mirror,
970                           "vfio-nvidia-bar0-88000-mirror-quirk",
971                           vdev->config_size);
972     memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
973                                         mirror->offset, mirror->mem, 1);
974 
975     QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
976 
977     /* The 0x1800 offset mirror only seems to get used by legacy VGA */
978     if (vdev->vga) {
979         quirk = vfio_quirk_alloc(1);
980         quirk->reset = vfio_nvidia_bar0_quirk_reset;
981         mirror = quirk->data = g_malloc0(sizeof(*mirror) + sizeof(LastDataSet));
982         mirror->mem = quirk->mem;
983         mirror->vdev = vdev;
984         mirror->offset = 0x1800;
985         mirror->bar = nr;
986         last = (LastDataSet *)&mirror->data;
987         last->quirk = quirk;
988 
989         memory_region_init_io(mirror->mem, OBJECT(vdev),
990                               &vfio_nvidia_mirror_quirk, mirror,
991                               "vfio-nvidia-bar0-1800-mirror-quirk",
992                               PCI_CONFIG_SPACE_SIZE);
993         memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
994                                             mirror->offset, mirror->mem, 1);
995 
996         QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
997     }
998 
999     trace_vfio_quirk_nvidia_bar0_probe(vdev->vbasedev.name);
1000 }
1001 
1002 /*
1003  * TODO - Some Nvidia devices provide config access to their companion HDA
1004  * device and even to their parent bridge via these config space mirrors.
1005  * Add quirks for those regions.
1006  */
1007 
1008 #define PCI_VENDOR_ID_REALTEK 0x10ec
1009 
1010 /*
1011  * RTL8168 devices have a backdoor that can access the MSI-X table.  At BAR2
1012  * offset 0x70 there is a dword data register, offset 0x74 is a dword address
1013  * register.  According to the Linux r8169 driver, the MSI-X table is addressed
1014  * when the "type" portion of the address register is set to 0x1.  This appears
1015  * to be bits 16:30.  Bit 31 is both a write indicator and some sort of
1016  * "address latched" indicator.  Bits 12:15 are a mask field, which we can
1017  * ignore because the MSI-X table should always be accessed as a dword (full
1018  * mask).  Bits 0:11 is offset within the type.
1019  *
1020  * Example trace:
1021  *
1022  * Read from MSI-X table offset 0
1023  * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x1f000, 4) // store read addr
1024  * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x8001f000 // latch
1025  * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x70, 4) = 0xfee00398 // read data
1026  *
1027  * Write 0xfee00000 to MSI-X table offset 0
1028  * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x70, 0xfee00000, 4) // write data
1029  * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x8001f000, 4) // do write
1030  * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x1f000 // complete
1031  */
1032 typedef struct VFIOrtl8168Quirk {
1033     VFIOPCIDevice *vdev;
1034     uint32_t addr;
1035     uint32_t data;
1036     bool enabled;
1037 } VFIOrtl8168Quirk;
1038 
1039 static uint64_t vfio_rtl8168_quirk_address_read(void *opaque,
1040                                                 hwaddr addr, unsigned size)
1041 {
1042     VFIOrtl8168Quirk *rtl = opaque;
1043     VFIOPCIDevice *vdev = rtl->vdev;
1044     uint64_t data = vfio_region_read(&vdev->bars[2].region, addr + 0x74, size);
1045 
1046     if (rtl->enabled) {
1047         data = rtl->addr ^ 0x80000000U; /* latch/complete */
1048         trace_vfio_quirk_rtl8168_fake_latch(vdev->vbasedev.name, data);
1049     }
1050 
1051     return data;
1052 }
1053 
1054 static void vfio_rtl8168_quirk_address_write(void *opaque, hwaddr addr,
1055                                              uint64_t data, unsigned size)
1056 {
1057     VFIOrtl8168Quirk *rtl = opaque;
1058     VFIOPCIDevice *vdev = rtl->vdev;
1059 
1060     rtl->enabled = false;
1061 
1062     if ((data & 0x7fff0000) == 0x10000) { /* MSI-X table */
1063         rtl->enabled = true;
1064         rtl->addr = (uint32_t)data;
1065 
1066         if (data & 0x80000000U) { /* Do write */
1067             if (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX) {
1068                 hwaddr offset = data & 0xfff;
1069                 uint64_t val = rtl->data;
1070 
1071                 trace_vfio_quirk_rtl8168_msix_write(vdev->vbasedev.name,
1072                                                     (uint16_t)offset, val);
1073 
1074                 /* Write to the proper guest MSI-X table instead */
1075                 memory_region_dispatch_write(&vdev->pdev.msix_table_mmio,
1076                                              offset, val, size,
1077                                              MEMTXATTRS_UNSPECIFIED);
1078             }
1079             return; /* Do not write guest MSI-X data to hardware */
1080         }
1081     }
1082 
1083     vfio_region_write(&vdev->bars[2].region, addr + 0x74, data, size);
1084 }
1085 
1086 static const MemoryRegionOps vfio_rtl_address_quirk = {
1087     .read = vfio_rtl8168_quirk_address_read,
1088     .write = vfio_rtl8168_quirk_address_write,
1089     .valid = {
1090         .min_access_size = 4,
1091         .max_access_size = 4,
1092         .unaligned = false,
1093     },
1094     .endianness = DEVICE_LITTLE_ENDIAN,
1095 };
1096 
1097 static uint64_t vfio_rtl8168_quirk_data_read(void *opaque,
1098                                              hwaddr addr, unsigned size)
1099 {
1100     VFIOrtl8168Quirk *rtl = opaque;
1101     VFIOPCIDevice *vdev = rtl->vdev;
1102     uint64_t data = vfio_region_read(&vdev->bars[2].region, addr + 0x70, size);
1103 
1104     if (rtl->enabled && (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) {
1105         hwaddr offset = rtl->addr & 0xfff;
1106         memory_region_dispatch_read(&vdev->pdev.msix_table_mmio, offset,
1107                                     &data, size, MEMTXATTRS_UNSPECIFIED);
1108         trace_vfio_quirk_rtl8168_msix_read(vdev->vbasedev.name, offset, data);
1109     }
1110 
1111     return data;
1112 }
1113 
1114 static void vfio_rtl8168_quirk_data_write(void *opaque, hwaddr addr,
1115                                           uint64_t data, unsigned size)
1116 {
1117     VFIOrtl8168Quirk *rtl = opaque;
1118     VFIOPCIDevice *vdev = rtl->vdev;
1119 
1120     rtl->data = (uint32_t)data;
1121 
1122     vfio_region_write(&vdev->bars[2].region, addr + 0x70, data, size);
1123 }
1124 
1125 static const MemoryRegionOps vfio_rtl_data_quirk = {
1126     .read = vfio_rtl8168_quirk_data_read,
1127     .write = vfio_rtl8168_quirk_data_write,
1128     .valid = {
1129         .min_access_size = 4,
1130         .max_access_size = 4,
1131         .unaligned = false,
1132     },
1133     .endianness = DEVICE_LITTLE_ENDIAN,
1134 };
1135 
1136 static void vfio_probe_rtl8168_bar2_quirk(VFIOPCIDevice *vdev, int nr)
1137 {
1138     VFIOQuirk *quirk;
1139     VFIOrtl8168Quirk *rtl;
1140 
1141     if (!vfio_pci_is(vdev, PCI_VENDOR_ID_REALTEK, 0x8168) || nr != 2) {
1142         return;
1143     }
1144 
1145     quirk = vfio_quirk_alloc(2);
1146     quirk->data = rtl = g_malloc0(sizeof(*rtl));
1147     rtl->vdev = vdev;
1148 
1149     memory_region_init_io(&quirk->mem[0], OBJECT(vdev),
1150                           &vfio_rtl_address_quirk, rtl,
1151                           "vfio-rtl8168-window-address-quirk", 4);
1152     memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
1153                                         0x74, &quirk->mem[0], 1);
1154 
1155     memory_region_init_io(&quirk->mem[1], OBJECT(vdev),
1156                           &vfio_rtl_data_quirk, rtl,
1157                           "vfio-rtl8168-window-data-quirk", 4);
1158     memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
1159                                         0x70, &quirk->mem[1], 1);
1160 
1161     QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
1162 
1163     trace_vfio_quirk_rtl8168_probe(vdev->vbasedev.name);
1164 }
1165 
1166 /*
1167  * Intel IGD support
1168  *
1169  * Obviously IGD is not a discrete device, this is evidenced not only by it
1170  * being integrated into the CPU, but by the various chipset and BIOS
1171  * dependencies that it brings along with it.  Intel is trying to move away
1172  * from this and Broadwell and newer devices can run in what Intel calls
1173  * "Universal Pass-Through" mode, or UPT.  Theoretically in UPT mode, nothing
1174  * more is required beyond assigning the IGD device to a VM.  There are
1175  * however support limitations to this mode.  It only supports IGD as a
1176  * secondary graphics device in the VM and it doesn't officially support any
1177  * physical outputs.
1178  *
1179  * The code here attempts to enable what we'll call legacy mode assignment,
1180  * IGD retains most of the capabilities we expect for it to have on bare
1181  * metal.  To enable this mode, the IGD device must be assigned to the VM
1182  * at PCI address 00:02.0, it must have a ROM, it very likely needs VGA
1183  * support, we must have VM BIOS support for reserving and populating some
1184  * of the required tables, and we need to tweak the chipset with revisions
1185  * and IDs and an LPC/ISA bridge device.  The intention is to make all of
1186  * this happen automatically by installing the device at the correct VM PCI
1187  * bus address.  If any of the conditions are not met, we cross our fingers
1188  * and hope the user knows better.
1189  *
1190  * NB - It is possible to enable physical outputs in UPT mode by supplying
1191  * an OpRegion table.  We don't do this by default because the guest driver
1192  * behaves differently if an OpRegion is provided and no monitor is attached
1193  * vs no OpRegion and a monitor being attached or not.  Effectively, if a
1194  * headless setup is desired, the OpRegion gets in the way of that.
1195  */
1196 
1197 /*
1198  * This presumes the device is already known to be an Intel VGA device, so we
1199  * take liberties in which device ID bits match which generation.  This should
1200  * not be taken as an indication that all the devices are supported, or even
1201  * supportable, some of them don't even support VT-d.
1202  * See linux:include/drm/i915_pciids.h for IDs.
1203  */
1204 static int igd_gen(VFIOPCIDevice *vdev)
1205 {
1206     if ((vdev->device_id & 0xfff) == 0xa84) {
1207         return 8; /* Broxton */
1208     }
1209 
1210     switch (vdev->device_id & 0xff00) {
1211     /* Old, untested, unavailable, unknown */
1212     case 0x0000:
1213     case 0x2500:
1214     case 0x2700:
1215     case 0x2900:
1216     case 0x2a00:
1217     case 0x2e00:
1218     case 0x3500:
1219     case 0xa000:
1220         return -1;
1221     /* SandyBridge, IvyBridge, ValleyView, Haswell */
1222     case 0x0100:
1223     case 0x0400:
1224     case 0x0a00:
1225     case 0x0c00:
1226     case 0x0d00:
1227     case 0x0f00:
1228         return 6;
1229     /* BroadWell, CherryView, SkyLake, KabyLake */
1230     case 0x1600:
1231     case 0x1900:
1232     case 0x2200:
1233     case 0x5900:
1234         return 8;
1235     }
1236 
1237     return 8; /* Assume newer is compatible */
1238 }
1239 
1240 typedef struct VFIOIGDQuirk {
1241     struct VFIOPCIDevice *vdev;
1242     uint32_t index;
1243     uint32_t bdsm;
1244 } VFIOIGDQuirk;
1245 
1246 #define IGD_GMCH 0x50 /* Graphics Control Register */
1247 #define IGD_BDSM 0x5c /* Base Data of Stolen Memory */
1248 #define IGD_ASLS 0xfc /* ASL Storage Register */
1249 
1250 /*
1251  * The OpRegion includes the Video BIOS Table, which seems important for
1252  * telling the driver what sort of outputs it has.  Without this, the device
1253  * may work in the guest, but we may not get output.  This also requires BIOS
1254  * support to reserve and populate a section of guest memory sufficient for
1255  * the table and to write the base address of that memory to the ASLS register
1256  * of the IGD device.
1257  */
1258 int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
1259                                struct vfio_region_info *info, Error **errp)
1260 {
1261     int ret;
1262 
1263     vdev->igd_opregion = g_malloc0(info->size);
1264     ret = pread(vdev->vbasedev.fd, vdev->igd_opregion,
1265                 info->size, info->offset);
1266     if (ret != info->size) {
1267         error_setg(errp, "failed to read IGD OpRegion");
1268         g_free(vdev->igd_opregion);
1269         vdev->igd_opregion = NULL;
1270         return -EINVAL;
1271     }
1272 
1273     /*
1274      * Provide fw_cfg with a copy of the OpRegion which the VM firmware is to
1275      * allocate 32bit reserved memory for, copy these contents into, and write
1276      * the reserved memory base address to the device ASLS register at 0xFC.
1277      * Alignment of this reserved region seems flexible, but using a 4k page
1278      * alignment seems to work well.  This interface assumes a single IGD
1279      * device, which may be at VM address 00:02.0 in legacy mode or another
1280      * address in UPT mode.
1281      *
1282      * NB, there may be future use cases discovered where the VM should have
1283      * direct interaction with the host OpRegion, in which case the write to
1284      * the ASLS register would trigger MemoryRegion setup to enable that.
1285      */
1286     fw_cfg_add_file(fw_cfg_find(), "etc/igd-opregion",
1287                     vdev->igd_opregion, info->size);
1288 
1289     trace_vfio_pci_igd_opregion_enabled(vdev->vbasedev.name);
1290 
1291     pci_set_long(vdev->pdev.config + IGD_ASLS, 0);
1292     pci_set_long(vdev->pdev.wmask + IGD_ASLS, ~0);
1293     pci_set_long(vdev->emulated_config_bits + IGD_ASLS, ~0);
1294 
1295     return 0;
1296 }
1297 
1298 /*
1299  * The rather short list of registers that we copy from the host devices.
1300  * The LPC/ISA bridge values are definitely needed to support the vBIOS, the
1301  * host bridge values may or may not be needed depending on the guest OS.
1302  * Since we're only munging revision and subsystem values on the host bridge,
1303  * we don't require our own device.  The LPC/ISA bridge needs to be our very
1304  * own though.
1305  */
1306 typedef struct {
1307     uint8_t offset;
1308     uint8_t len;
1309 } IGDHostInfo;
1310 
1311 static const IGDHostInfo igd_host_bridge_infos[] = {
1312     {PCI_REVISION_ID,         2},
1313     {PCI_SUBSYSTEM_VENDOR_ID, 2},
1314     {PCI_SUBSYSTEM_ID,        2},
1315 };
1316 
1317 static const IGDHostInfo igd_lpc_bridge_infos[] = {
1318     {PCI_VENDOR_ID,           2},
1319     {PCI_DEVICE_ID,           2},
1320     {PCI_REVISION_ID,         2},
1321     {PCI_SUBSYSTEM_VENDOR_ID, 2},
1322     {PCI_SUBSYSTEM_ID,        2},
1323 };
1324 
1325 static int vfio_pci_igd_copy(VFIOPCIDevice *vdev, PCIDevice *pdev,
1326                              struct vfio_region_info *info,
1327                              const IGDHostInfo *list, int len)
1328 {
1329     int i, ret;
1330 
1331     for (i = 0; i < len; i++) {
1332         ret = pread(vdev->vbasedev.fd, pdev->config + list[i].offset,
1333                     list[i].len, info->offset + list[i].offset);
1334         if (ret != list[i].len) {
1335             error_report("IGD copy failed: %m");
1336             return -errno;
1337         }
1338     }
1339 
1340     return 0;
1341 }
1342 
1343 /*
1344  * Stuff a few values into the host bridge.
1345  */
1346 static int vfio_pci_igd_host_init(VFIOPCIDevice *vdev,
1347                                   struct vfio_region_info *info)
1348 {
1349     PCIBus *bus;
1350     PCIDevice *host_bridge;
1351     int ret;
1352 
1353     bus = pci_device_root_bus(&vdev->pdev);
1354     host_bridge = pci_find_device(bus, 0, PCI_DEVFN(0, 0));
1355 
1356     if (!host_bridge) {
1357         error_report("Can't find host bridge");
1358         return -ENODEV;
1359     }
1360 
1361     ret = vfio_pci_igd_copy(vdev, host_bridge, info, igd_host_bridge_infos,
1362                             ARRAY_SIZE(igd_host_bridge_infos));
1363     if (!ret) {
1364         trace_vfio_pci_igd_host_bridge_enabled(vdev->vbasedev.name);
1365     }
1366 
1367     return ret;
1368 }
1369 
1370 /*
1371  * IGD LPC/ISA bridge support code.  The vBIOS needs this, but we can't write
1372  * arbitrary values into just any bridge, so we must create our own.  We try
1373  * to handle if the user has created it for us, which they might want to do
1374  * to enable multifunction so we don't occupy the whole PCI slot.
1375  */
1376 static void vfio_pci_igd_lpc_bridge_realize(PCIDevice *pdev, Error **errp)
1377 {
1378     if (pdev->devfn != PCI_DEVFN(0x1f, 0)) {
1379         error_setg(errp, "VFIO dummy ISA/LPC bridge must have address 1f.0");
1380     }
1381 }
1382 
1383 static void vfio_pci_igd_lpc_bridge_class_init(ObjectClass *klass, void *data)
1384 {
1385     DeviceClass *dc = DEVICE_CLASS(klass);
1386     PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1387 
1388     set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
1389     dc->desc = "VFIO dummy ISA/LPC bridge for IGD assignment";
1390     dc->hotpluggable = false;
1391     k->realize = vfio_pci_igd_lpc_bridge_realize;
1392     k->class_id = PCI_CLASS_BRIDGE_ISA;
1393 }
1394 
1395 static TypeInfo vfio_pci_igd_lpc_bridge_info = {
1396     .name = "vfio-pci-igd-lpc-bridge",
1397     .parent = TYPE_PCI_DEVICE,
1398     .class_init = vfio_pci_igd_lpc_bridge_class_init,
1399     .interfaces = (InterfaceInfo[]) {
1400         { INTERFACE_CONVENTIONAL_PCI_DEVICE },
1401         { },
1402     },
1403 };
1404 
1405 static void vfio_pci_igd_register_types(void)
1406 {
1407     type_register_static(&vfio_pci_igd_lpc_bridge_info);
1408 }
1409 
1410 type_init(vfio_pci_igd_register_types)
1411 
1412 static int vfio_pci_igd_lpc_init(VFIOPCIDevice *vdev,
1413                                  struct vfio_region_info *info)
1414 {
1415     PCIDevice *lpc_bridge;
1416     int ret;
1417 
1418     lpc_bridge = pci_find_device(pci_device_root_bus(&vdev->pdev),
1419                                  0, PCI_DEVFN(0x1f, 0));
1420     if (!lpc_bridge) {
1421         lpc_bridge = pci_create_simple(pci_device_root_bus(&vdev->pdev),
1422                                  PCI_DEVFN(0x1f, 0), "vfio-pci-igd-lpc-bridge");
1423     }
1424 
1425     ret = vfio_pci_igd_copy(vdev, lpc_bridge, info, igd_lpc_bridge_infos,
1426                             ARRAY_SIZE(igd_lpc_bridge_infos));
1427     if (!ret) {
1428         trace_vfio_pci_igd_lpc_bridge_enabled(vdev->vbasedev.name);
1429     }
1430 
1431     return ret;
1432 }
1433 
1434 /*
1435  * IGD Gen8 and newer support up to 8MB for the GTT and use a 64bit PTE
1436  * entry, older IGDs use 2MB and 32bit.  Each PTE maps a 4k page.  Therefore
1437  * we either have 2M/4k * 4 = 2k or 8M/4k * 8 = 16k as the maximum iobar index
1438  * for programming the GTT.
1439  *
1440  * See linux:include/drm/i915_drm.h for shift and mask values.
1441  */
1442 static int vfio_igd_gtt_max(VFIOPCIDevice *vdev)
1443 {
1444     uint32_t gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, sizeof(gmch));
1445     int ggms, gen = igd_gen(vdev);
1446 
1447     gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, sizeof(gmch));
1448     ggms = (gmch >> (gen < 8 ? 8 : 6)) & 0x3;
1449     if (gen > 6) {
1450         ggms = 1 << ggms;
1451     }
1452 
1453     ggms *= MiB;
1454 
1455     return (ggms / (4 * KiB)) * (gen < 8 ? 4 : 8);
1456 }
1457 
1458 /*
1459  * The IGD ROM will make use of stolen memory (GGMS) for support of VESA modes.
1460  * Somehow the host stolen memory range is used for this, but how the ROM gets
1461  * it is a mystery, perhaps it's hardcoded into the ROM.  Thankfully though, it
1462  * reprograms the GTT through the IOBAR where we can trap it and transpose the
1463  * programming to the VM allocated buffer.  That buffer gets reserved by the VM
1464  * firmware via the fw_cfg entry added below.  Here we're just monitoring the
1465  * IOBAR address and data registers to detect a write sequence targeting the
1466  * GTTADR.  This code is developed by observed behavior and doesn't have a
1467  * direct spec reference, unfortunately.
1468  */
1469 static uint64_t vfio_igd_quirk_data_read(void *opaque,
1470                                          hwaddr addr, unsigned size)
1471 {
1472     VFIOIGDQuirk *igd = opaque;
1473     VFIOPCIDevice *vdev = igd->vdev;
1474 
1475     igd->index = ~0;
1476 
1477     return vfio_region_read(&vdev->bars[4].region, addr + 4, size);
1478 }
1479 
1480 static void vfio_igd_quirk_data_write(void *opaque, hwaddr addr,
1481                                       uint64_t data, unsigned size)
1482 {
1483     VFIOIGDQuirk *igd = opaque;
1484     VFIOPCIDevice *vdev = igd->vdev;
1485     uint64_t val = data;
1486     int gen = igd_gen(vdev);
1487 
1488     /*
1489      * Programming the GGMS starts at index 0x1 and uses every 4th index (ie.
1490      * 0x1, 0x5, 0x9, 0xd,...).  For pre-Gen8 each 4-byte write is a whole PTE
1491      * entry, with 0th bit enable set.  For Gen8 and up, PTEs are 64bit, so
1492      * entries 0x5 & 0xd are the high dword, in our case zero.  Each PTE points
1493      * to a 4k page, which we translate to a page from the VM allocated region,
1494      * pointed to by the BDSM register.  If this is not set, we fail.
1495      *
1496      * We trap writes to the full configured GTT size, but we typically only
1497      * see the vBIOS writing up to (nearly) the 1MB barrier.  In fact it often
1498      * seems to miss the last entry for an even 1MB GTT.  Doing a gratuitous
1499      * write of that last entry does work, but is hopefully unnecessary since
1500      * we clear the previous GTT on initialization.
1501      */
1502     if ((igd->index % 4 == 1) && igd->index < vfio_igd_gtt_max(vdev)) {
1503         if (gen < 8 || (igd->index % 8 == 1)) {
1504             uint32_t base;
1505 
1506             base = pci_get_long(vdev->pdev.config + IGD_BDSM);
1507             if (!base) {
1508                 hw_error("vfio-igd: Guest attempted to program IGD GTT before "
1509                          "BIOS reserved stolen memory.  Unsupported BIOS?");
1510             }
1511 
1512             val = data - igd->bdsm + base;
1513         } else {
1514             val = 0; /* upper 32bits of pte, we only enable below 4G PTEs */
1515         }
1516 
1517         trace_vfio_pci_igd_bar4_write(vdev->vbasedev.name,
1518                                       igd->index, data, val);
1519     }
1520 
1521     vfio_region_write(&vdev->bars[4].region, addr + 4, val, size);
1522 
1523     igd->index = ~0;
1524 }
1525 
1526 static const MemoryRegionOps vfio_igd_data_quirk = {
1527     .read = vfio_igd_quirk_data_read,
1528     .write = vfio_igd_quirk_data_write,
1529     .endianness = DEVICE_LITTLE_ENDIAN,
1530 };
1531 
1532 static uint64_t vfio_igd_quirk_index_read(void *opaque,
1533                                           hwaddr addr, unsigned size)
1534 {
1535     VFIOIGDQuirk *igd = opaque;
1536     VFIOPCIDevice *vdev = igd->vdev;
1537 
1538     igd->index = ~0;
1539 
1540     return vfio_region_read(&vdev->bars[4].region, addr, size);
1541 }
1542 
1543 static void vfio_igd_quirk_index_write(void *opaque, hwaddr addr,
1544                                        uint64_t data, unsigned size)
1545 {
1546     VFIOIGDQuirk *igd = opaque;
1547     VFIOPCIDevice *vdev = igd->vdev;
1548 
1549     igd->index = data;
1550 
1551     vfio_region_write(&vdev->bars[4].region, addr, data, size);
1552 }
1553 
1554 static const MemoryRegionOps vfio_igd_index_quirk = {
1555     .read = vfio_igd_quirk_index_read,
1556     .write = vfio_igd_quirk_index_write,
1557     .endianness = DEVICE_LITTLE_ENDIAN,
1558 };
1559 
1560 static void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
1561 {
1562     struct vfio_region_info *rom = NULL, *opregion = NULL,
1563                             *host = NULL, *lpc = NULL;
1564     VFIOQuirk *quirk;
1565     VFIOIGDQuirk *igd;
1566     PCIDevice *lpc_bridge;
1567     int i, ret, ggms_mb, gms_mb = 0, gen;
1568     uint64_t *bdsm_size;
1569     uint32_t gmch;
1570     uint16_t cmd_orig, cmd;
1571     Error *err = NULL;
1572 
1573     /*
1574      * This must be an Intel VGA device at address 00:02.0 for us to even
1575      * consider enabling legacy mode.  The vBIOS has dependencies on the
1576      * PCI bus address.
1577      */
1578     if (!vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, PCI_ANY_ID) ||
1579         !vfio_is_vga(vdev) || nr != 4 ||
1580         &vdev->pdev != pci_find_device(pci_device_root_bus(&vdev->pdev),
1581                                        0, PCI_DEVFN(0x2, 0))) {
1582         return;
1583     }
1584 
1585     /*
1586      * We need to create an LPC/ISA bridge at PCI bus address 00:1f.0 that we
1587      * can stuff host values into, so if there's already one there and it's not
1588      * one we can hack on, legacy mode is no-go.  Sorry Q35.
1589      */
1590     lpc_bridge = pci_find_device(pci_device_root_bus(&vdev->pdev),
1591                                  0, PCI_DEVFN(0x1f, 0));
1592     if (lpc_bridge && !object_dynamic_cast(OBJECT(lpc_bridge),
1593                                            "vfio-pci-igd-lpc-bridge")) {
1594         error_report("IGD device %s cannot support legacy mode due to existing "
1595                      "devices at address 1f.0", vdev->vbasedev.name);
1596         return;
1597     }
1598 
1599     /*
1600      * IGD is not a standard, they like to change their specs often.  We
1601      * only attempt to support back to SandBridge and we hope that newer
1602      * devices maintain compatibility with generation 8.
1603      */
1604     gen = igd_gen(vdev);
1605     if (gen != 6 && gen != 8) {
1606         error_report("IGD device %s is unsupported in legacy mode, "
1607                      "try SandyBridge or newer", vdev->vbasedev.name);
1608         return;
1609     }
1610 
1611     /*
1612      * Most of what we're doing here is to enable the ROM to run, so if
1613      * there's no ROM, there's no point in setting up this quirk.
1614      * NB. We only seem to get BIOS ROMs, so a UEFI VM would need CSM support.
1615      */
1616     ret = vfio_get_region_info(&vdev->vbasedev,
1617                                VFIO_PCI_ROM_REGION_INDEX, &rom);
1618     if ((ret || !rom->size) && !vdev->pdev.romfile) {
1619         error_report("IGD device %s has no ROM, legacy mode disabled",
1620                      vdev->vbasedev.name);
1621         goto out;
1622     }
1623 
1624     /*
1625      * Ignore the hotplug corner case, mark the ROM failed, we can't
1626      * create the devices we need for legacy mode in the hotplug scenario.
1627      */
1628     if (vdev->pdev.qdev.hotplugged) {
1629         error_report("IGD device %s hotplugged, ROM disabled, "
1630                      "legacy mode disabled", vdev->vbasedev.name);
1631         vdev->rom_read_failed = true;
1632         goto out;
1633     }
1634 
1635     /*
1636      * Check whether we have all the vfio device specific regions to
1637      * support legacy mode (added in Linux v4.6).  If not, bail.
1638      */
1639     ret = vfio_get_dev_region_info(&vdev->vbasedev,
1640                         VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
1641                         VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
1642     if (ret) {
1643         error_report("IGD device %s does not support OpRegion access,"
1644                      "legacy mode disabled", vdev->vbasedev.name);
1645         goto out;
1646     }
1647 
1648     ret = vfio_get_dev_region_info(&vdev->vbasedev,
1649                         VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
1650                         VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG, &host);
1651     if (ret) {
1652         error_report("IGD device %s does not support host bridge access,"
1653                      "legacy mode disabled", vdev->vbasedev.name);
1654         goto out;
1655     }
1656 
1657     ret = vfio_get_dev_region_info(&vdev->vbasedev,
1658                         VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
1659                         VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG, &lpc);
1660     if (ret) {
1661         error_report("IGD device %s does not support LPC bridge access,"
1662                      "legacy mode disabled", vdev->vbasedev.name);
1663         goto out;
1664     }
1665 
1666     gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, 4);
1667 
1668     /*
1669      * If IGD VGA Disable is clear (expected) and VGA is not already enabled,
1670      * try to enable it.  Probably shouldn't be using legacy mode without VGA,
1671      * but also no point in us enabling VGA if disabled in hardware.
1672      */
1673     if (!(gmch & 0x2) && !vdev->vga && vfio_populate_vga(vdev, &err)) {
1674         error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
1675         error_report("IGD device %s failed to enable VGA access, "
1676                      "legacy mode disabled", vdev->vbasedev.name);
1677         goto out;
1678     }
1679 
1680     /* Create our LPC/ISA bridge */
1681     ret = vfio_pci_igd_lpc_init(vdev, lpc);
1682     if (ret) {
1683         error_report("IGD device %s failed to create LPC bridge, "
1684                      "legacy mode disabled", vdev->vbasedev.name);
1685         goto out;
1686     }
1687 
1688     /* Stuff some host values into the VM PCI host bridge */
1689     ret = vfio_pci_igd_host_init(vdev, host);
1690     if (ret) {
1691         error_report("IGD device %s failed to modify host bridge, "
1692                      "legacy mode disabled", vdev->vbasedev.name);
1693         goto out;
1694     }
1695 
1696     /* Setup OpRegion access */
1697     ret = vfio_pci_igd_opregion_init(vdev, opregion, &err);
1698     if (ret) {
1699         error_append_hint(&err, "IGD legacy mode disabled\n");
1700         error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
1701         goto out;
1702     }
1703 
1704     /* Setup our quirk to munge GTT addresses to the VM allocated buffer */
1705     quirk = vfio_quirk_alloc(2);
1706     igd = quirk->data = g_malloc0(sizeof(*igd));
1707     igd->vdev = vdev;
1708     igd->index = ~0;
1709     igd->bdsm = vfio_pci_read_config(&vdev->pdev, IGD_BDSM, 4);
1710     igd->bdsm &= ~((1 * MiB) - 1); /* 1MB aligned */
1711 
1712     memory_region_init_io(&quirk->mem[0], OBJECT(vdev), &vfio_igd_index_quirk,
1713                           igd, "vfio-igd-index-quirk", 4);
1714     memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
1715                                         0, &quirk->mem[0], 1);
1716 
1717     memory_region_init_io(&quirk->mem[1], OBJECT(vdev), &vfio_igd_data_quirk,
1718                           igd, "vfio-igd-data-quirk", 4);
1719     memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
1720                                         4, &quirk->mem[1], 1);
1721 
1722     QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
1723 
1724     /* Determine the size of stolen memory needed for GTT */
1725     ggms_mb = (gmch >> (gen < 8 ? 8 : 6)) & 0x3;
1726     if (gen > 6) {
1727         ggms_mb = 1 << ggms_mb;
1728     }
1729 
1730     /*
1731      * Assume we have no GMS memory, but allow it to be overrided by device
1732      * option (experimental).  The spec doesn't actually allow zero GMS when
1733      * when IVD (IGD VGA Disable) is clear, but the claim is that it's unused,
1734      * so let's not waste VM memory for it.
1735      */
1736     gmch &= ~((gen < 8 ? 0x1f : 0xff) << (gen < 8 ? 3 : 8));
1737 
1738     if (vdev->igd_gms) {
1739         if (vdev->igd_gms <= 0x10) {
1740             gms_mb = vdev->igd_gms * 32;
1741             gmch |= vdev->igd_gms << (gen < 8 ? 3 : 8);
1742         } else {
1743             error_report("Unsupported IGD GMS value 0x%x", vdev->igd_gms);
1744             vdev->igd_gms = 0;
1745         }
1746     }
1747 
1748     /*
1749      * Request reserved memory for stolen memory via fw_cfg.  VM firmware
1750      * must allocate a 1MB aligned reserved memory region below 4GB with
1751      * the requested size (in bytes) for use by the Intel PCI class VGA
1752      * device at VM address 00:02.0.  The base address of this reserved
1753      * memory region must be written to the device BDSM regsiter at PCI
1754      * config offset 0x5C.
1755      */
1756     bdsm_size = g_malloc(sizeof(*bdsm_size));
1757     *bdsm_size = cpu_to_le64((ggms_mb + gms_mb) * MiB);
1758     fw_cfg_add_file(fw_cfg_find(), "etc/igd-bdsm-size",
1759                     bdsm_size, sizeof(*bdsm_size));
1760 
1761     /* GMCH is read-only, emulated */
1762     pci_set_long(vdev->pdev.config + IGD_GMCH, gmch);
1763     pci_set_long(vdev->pdev.wmask + IGD_GMCH, 0);
1764     pci_set_long(vdev->emulated_config_bits + IGD_GMCH, ~0);
1765 
1766     /* BDSM is read-write, emulated.  The BIOS needs to be able to write it */
1767     pci_set_long(vdev->pdev.config + IGD_BDSM, 0);
1768     pci_set_long(vdev->pdev.wmask + IGD_BDSM, ~0);
1769     pci_set_long(vdev->emulated_config_bits + IGD_BDSM, ~0);
1770 
1771     /*
1772      * This IOBAR gives us access to GTTADR, which allows us to write to
1773      * the GTT itself.  So let's go ahead and write zero to all the GTT
1774      * entries to avoid spurious DMA faults.  Be sure I/O access is enabled
1775      * before talking to the device.
1776      */
1777     if (pread(vdev->vbasedev.fd, &cmd_orig, sizeof(cmd_orig),
1778               vdev->config_offset + PCI_COMMAND) != sizeof(cmd_orig)) {
1779         error_report("IGD device %s - failed to read PCI command register",
1780                      vdev->vbasedev.name);
1781     }
1782 
1783     cmd = cmd_orig | PCI_COMMAND_IO;
1784 
1785     if (pwrite(vdev->vbasedev.fd, &cmd, sizeof(cmd),
1786                vdev->config_offset + PCI_COMMAND) != sizeof(cmd)) {
1787         error_report("IGD device %s - failed to write PCI command register",
1788                      vdev->vbasedev.name);
1789     }
1790 
1791     for (i = 1; i < vfio_igd_gtt_max(vdev); i += 4) {
1792         vfio_region_write(&vdev->bars[4].region, 0, i, 4);
1793         vfio_region_write(&vdev->bars[4].region, 4, 0, 4);
1794     }
1795 
1796     if (pwrite(vdev->vbasedev.fd, &cmd_orig, sizeof(cmd_orig),
1797                vdev->config_offset + PCI_COMMAND) != sizeof(cmd_orig)) {
1798         error_report("IGD device %s - failed to restore PCI command register",
1799                      vdev->vbasedev.name);
1800     }
1801 
1802     trace_vfio_pci_igd_bdsm_enabled(vdev->vbasedev.name, ggms_mb + gms_mb);
1803 
1804 out:
1805     g_free(rom);
1806     g_free(opregion);
1807     g_free(host);
1808     g_free(lpc);
1809 }
1810 
1811 /*
1812  * Common quirk probe entry points.
1813  */
1814 void vfio_vga_quirk_setup(VFIOPCIDevice *vdev)
1815 {
1816     vfio_vga_probe_ati_3c3_quirk(vdev);
1817     vfio_vga_probe_nvidia_3d0_quirk(vdev);
1818 }
1819 
1820 void vfio_vga_quirk_exit(VFIOPCIDevice *vdev)
1821 {
1822     VFIOQuirk *quirk;
1823     int i, j;
1824 
1825     for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
1826         QLIST_FOREACH(quirk, &vdev->vga->region[i].quirks, next) {
1827             for (j = 0; j < quirk->nr_mem; j++) {
1828                 memory_region_del_subregion(&vdev->vga->region[i].mem,
1829                                             &quirk->mem[j]);
1830             }
1831         }
1832     }
1833 }
1834 
1835 void vfio_vga_quirk_finalize(VFIOPCIDevice *vdev)
1836 {
1837     int i, j;
1838 
1839     for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
1840         while (!QLIST_EMPTY(&vdev->vga->region[i].quirks)) {
1841             VFIOQuirk *quirk = QLIST_FIRST(&vdev->vga->region[i].quirks);
1842             QLIST_REMOVE(quirk, next);
1843             for (j = 0; j < quirk->nr_mem; j++) {
1844                 object_unparent(OBJECT(&quirk->mem[j]));
1845             }
1846             g_free(quirk->mem);
1847             g_free(quirk->data);
1848             g_free(quirk);
1849         }
1850     }
1851 }
1852 
1853 void vfio_bar_quirk_setup(VFIOPCIDevice *vdev, int nr)
1854 {
1855     vfio_probe_ati_bar4_quirk(vdev, nr);
1856     vfio_probe_ati_bar2_quirk(vdev, nr);
1857     vfio_probe_nvidia_bar5_quirk(vdev, nr);
1858     vfio_probe_nvidia_bar0_quirk(vdev, nr);
1859     vfio_probe_rtl8168_bar2_quirk(vdev, nr);
1860     vfio_probe_igd_bar4_quirk(vdev, nr);
1861 }
1862 
1863 void vfio_bar_quirk_exit(VFIOPCIDevice *vdev, int nr)
1864 {
1865     VFIOBAR *bar = &vdev->bars[nr];
1866     VFIOQuirk *quirk;
1867     int i;
1868 
1869     QLIST_FOREACH(quirk, &bar->quirks, next) {
1870         while (!QLIST_EMPTY(&quirk->ioeventfds)) {
1871             vfio_ioeventfd_exit(vdev, QLIST_FIRST(&quirk->ioeventfds));
1872         }
1873 
1874         for (i = 0; i < quirk->nr_mem; i++) {
1875             memory_region_del_subregion(bar->region.mem, &quirk->mem[i]);
1876         }
1877     }
1878 }
1879 
1880 void vfio_bar_quirk_finalize(VFIOPCIDevice *vdev, int nr)
1881 {
1882     VFIOBAR *bar = &vdev->bars[nr];
1883     int i;
1884 
1885     while (!QLIST_EMPTY(&bar->quirks)) {
1886         VFIOQuirk *quirk = QLIST_FIRST(&bar->quirks);
1887         QLIST_REMOVE(quirk, next);
1888         for (i = 0; i < quirk->nr_mem; i++) {
1889             object_unparent(OBJECT(&quirk->mem[i]));
1890         }
1891         g_free(quirk->mem);
1892         g_free(quirk->data);
1893         g_free(quirk);
1894     }
1895 }
1896 
1897 /*
1898  * Reset quirks
1899  */
1900 void vfio_quirk_reset(VFIOPCIDevice *vdev)
1901 {
1902     int i;
1903 
1904     for (i = 0; i < PCI_ROM_SLOT; i++) {
1905         VFIOQuirk *quirk;
1906         VFIOBAR *bar = &vdev->bars[i];
1907 
1908         QLIST_FOREACH(quirk, &bar->quirks, next) {
1909             if (quirk->reset) {
1910                 quirk->reset(vdev, quirk);
1911             }
1912         }
1913     }
1914 }
1915 
1916 /*
1917  * AMD Radeon PCI config reset, based on Linux:
1918  *   drivers/gpu/drm/radeon/ci_smc.c:ci_is_smc_running()
1919  *   drivers/gpu/drm/radeon/radeon_device.c:radeon_pci_config_reset
1920  *   drivers/gpu/drm/radeon/ci_smc.c:ci_reset_smc()
1921  *   drivers/gpu/drm/radeon/ci_smc.c:ci_stop_smc_clock()
1922  * IDs: include/drm/drm_pciids.h
1923  * Registers: http://cgit.freedesktop.org/~agd5f/linux/commit/?id=4e2aa447f6f0
1924  *
1925  * Bonaire and Hawaii GPUs do not respond to a bus reset.  This is a bug in the
1926  * hardware that should be fixed on future ASICs.  The symptom of this is that
1927  * once the accerlated driver loads, Windows guests will bsod on subsequent
1928  * attmpts to load the driver, such as after VM reset or shutdown/restart.  To
1929  * work around this, we do an AMD specific PCI config reset, followed by an SMC
1930  * reset.  The PCI config reset only works if SMC firmware is running, so we
1931  * have a dependency on the state of the device as to whether this reset will
1932  * be effective.  There are still cases where we won't be able to kick the
1933  * device into working, but this greatly improves the usability overall.  The
1934  * config reset magic is relatively common on AMD GPUs, but the setup and SMC
1935  * poking is largely ASIC specific.
1936  */
1937 static bool vfio_radeon_smc_is_running(VFIOPCIDevice *vdev)
1938 {
1939     uint32_t clk, pc_c;
1940 
1941     /*
1942      * Registers 200h and 204h are index and data registers for accessing
1943      * indirect configuration registers within the device.
1944      */
1945     vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000004, 4);
1946     clk = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
1947     vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000370, 4);
1948     pc_c = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
1949 
1950     return (!(clk & 1) && (0x20100 <= pc_c));
1951 }
1952 
1953 /*
1954  * The scope of a config reset is controlled by a mode bit in the misc register
1955  * and a fuse, exposed as a bit in another register.  The fuse is the default
1956  * (0 = GFX, 1 = whole GPU), the misc bit is a toggle, with the forumula
1957  * scope = !(misc ^ fuse), where the resulting scope is defined the same as
1958  * the fuse.  A truth table therefore tells us that if misc == fuse, we need
1959  * to flip the value of the bit in the misc register.
1960  */
1961 static void vfio_radeon_set_gfx_only_reset(VFIOPCIDevice *vdev)
1962 {
1963     uint32_t misc, fuse;
1964     bool a, b;
1965 
1966     vfio_region_write(&vdev->bars[5].region, 0x200, 0xc00c0000, 4);
1967     fuse = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
1968     b = fuse & 64;
1969 
1970     vfio_region_write(&vdev->bars[5].region, 0x200, 0xc0000010, 4);
1971     misc = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
1972     a = misc & 2;
1973 
1974     if (a == b) {
1975         vfio_region_write(&vdev->bars[5].region, 0x204, misc ^ 2, 4);
1976         vfio_region_read(&vdev->bars[5].region, 0x204, 4); /* flush */
1977     }
1978 }
1979 
1980 static int vfio_radeon_reset(VFIOPCIDevice *vdev)
1981 {
1982     PCIDevice *pdev = &vdev->pdev;
1983     int i, ret = 0;
1984     uint32_t data;
1985 
1986     /* Defer to a kernel implemented reset */
1987     if (vdev->vbasedev.reset_works) {
1988         trace_vfio_quirk_ati_bonaire_reset_skipped(vdev->vbasedev.name);
1989         return -ENODEV;
1990     }
1991 
1992     /* Enable only memory BAR access */
1993     vfio_pci_write_config(pdev, PCI_COMMAND, PCI_COMMAND_MEMORY, 2);
1994 
1995     /* Reset only works if SMC firmware is loaded and running */
1996     if (!vfio_radeon_smc_is_running(vdev)) {
1997         ret = -EINVAL;
1998         trace_vfio_quirk_ati_bonaire_reset_no_smc(vdev->vbasedev.name);
1999         goto out;
2000     }
2001 
2002     /* Make sure only the GFX function is reset */
2003     vfio_radeon_set_gfx_only_reset(vdev);
2004 
2005     /* AMD PCI config reset */
2006     vfio_pci_write_config(pdev, 0x7c, 0x39d5e86b, 4);
2007     usleep(100);
2008 
2009     /* Read back the memory size to make sure we're out of reset */
2010     for (i = 0; i < 100000; i++) {
2011         if (vfio_region_read(&vdev->bars[5].region, 0x5428, 4) != 0xffffffff) {
2012             goto reset_smc;
2013         }
2014         usleep(1);
2015     }
2016 
2017     trace_vfio_quirk_ati_bonaire_reset_timeout(vdev->vbasedev.name);
2018 
2019 reset_smc:
2020     /* Reset SMC */
2021     vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000000, 4);
2022     data = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
2023     data |= 1;
2024     vfio_region_write(&vdev->bars[5].region, 0x204, data, 4);
2025 
2026     /* Disable SMC clock */
2027     vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000004, 4);
2028     data = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
2029     data |= 1;
2030     vfio_region_write(&vdev->bars[5].region, 0x204, data, 4);
2031 
2032     trace_vfio_quirk_ati_bonaire_reset_done(vdev->vbasedev.name);
2033 
2034 out:
2035     /* Restore PCI command register */
2036     vfio_pci_write_config(pdev, PCI_COMMAND, 0, 2);
2037 
2038     return ret;
2039 }
2040 
2041 void vfio_setup_resetfn_quirk(VFIOPCIDevice *vdev)
2042 {
2043     switch (vdev->vendor_id) {
2044     case 0x1002:
2045         switch (vdev->device_id) {
2046         /* Bonaire */
2047         case 0x6649: /* Bonaire [FirePro W5100] */
2048         case 0x6650:
2049         case 0x6651:
2050         case 0x6658: /* Bonaire XTX [Radeon R7 260X] */
2051         case 0x665c: /* Bonaire XT [Radeon HD 7790/8770 / R9 260 OEM] */
2052         case 0x665d: /* Bonaire [Radeon R7 200 Series] */
2053         /* Hawaii */
2054         case 0x67A0: /* Hawaii XT GL [FirePro W9100] */
2055         case 0x67A1: /* Hawaii PRO GL [FirePro W8100] */
2056         case 0x67A2:
2057         case 0x67A8:
2058         case 0x67A9:
2059         case 0x67AA:
2060         case 0x67B0: /* Hawaii XT [Radeon R9 290X] */
2061         case 0x67B1: /* Hawaii PRO [Radeon R9 290] */
2062         case 0x67B8:
2063         case 0x67B9:
2064         case 0x67BA:
2065         case 0x67BE:
2066             vdev->resetfn = vfio_radeon_reset;
2067             trace_vfio_quirk_ati_bonaire_reset(vdev->vbasedev.name);
2068             break;
2069         }
2070         break;
2071     }
2072 }
2073 
2074 /*
2075  * The NVIDIA GPUDirect P2P Vendor capability allows the user to specify
2076  * devices as a member of a clique.  Devices within the same clique ID
2077  * are capable of direct P2P.  It's the user's responsibility that this
2078  * is correct.  The spec says that this may reside at any unused config
2079  * offset, but reserves and recommends hypervisors place this at C8h.
2080  * The spec also states that the hypervisor should place this capability
2081  * at the end of the capability list, thus next is defined as 0h.
2082  *
2083  * +----------------+----------------+----------------+----------------+
2084  * | sig 7:0 ('P')  |  vndr len (8h) |    next (0h)   |   cap id (9h)  |
2085  * +----------------+----------------+----------------+----------------+
2086  * | rsvd 15:7(0h),id 6:3,ver 2:0(0h)|          sig 23:8 ('P2')        |
2087  * +---------------------------------+---------------------------------+
2088  *
2089  * https://lists.gnu.org/archive/html/qemu-devel/2017-08/pdfUda5iEpgOS.pdf
2090  */
2091 static void get_nv_gpudirect_clique_id(Object *obj, Visitor *v,
2092                                        const char *name, void *opaque,
2093                                        Error **errp)
2094 {
2095     DeviceState *dev = DEVICE(obj);
2096     Property *prop = opaque;
2097     uint8_t *ptr = qdev_get_prop_ptr(dev, prop);
2098 
2099     visit_type_uint8(v, name, ptr, errp);
2100 }
2101 
2102 static void set_nv_gpudirect_clique_id(Object *obj, Visitor *v,
2103                                        const char *name, void *opaque,
2104                                        Error **errp)
2105 {
2106     DeviceState *dev = DEVICE(obj);
2107     Property *prop = opaque;
2108     uint8_t value, *ptr = qdev_get_prop_ptr(dev, prop);
2109     Error *local_err = NULL;
2110 
2111     if (dev->realized) {
2112         qdev_prop_set_after_realize(dev, name, errp);
2113         return;
2114     }
2115 
2116     visit_type_uint8(v, name, &value, &local_err);
2117     if (local_err) {
2118         error_propagate(errp, local_err);
2119         return;
2120     }
2121 
2122     if (value & ~0xF) {
2123         error_setg(errp, "Property %s: valid range 0-15", name);
2124         return;
2125     }
2126 
2127     *ptr = value;
2128 }
2129 
2130 const PropertyInfo qdev_prop_nv_gpudirect_clique = {
2131     .name = "uint4",
2132     .description = "NVIDIA GPUDirect Clique ID (0 - 15)",
2133     .get = get_nv_gpudirect_clique_id,
2134     .set = set_nv_gpudirect_clique_id,
2135 };
2136 
2137 static int vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp)
2138 {
2139     PCIDevice *pdev = &vdev->pdev;
2140     int ret, pos = 0xC8;
2141 
2142     if (vdev->nv_gpudirect_clique == 0xFF) {
2143         return 0;
2144     }
2145 
2146     if (!vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID)) {
2147         error_setg(errp, "NVIDIA GPUDirect Clique ID: invalid device vendor");
2148         return -EINVAL;
2149     }
2150 
2151     if (pci_get_byte(pdev->config + PCI_CLASS_DEVICE + 1) !=
2152         PCI_BASE_CLASS_DISPLAY) {
2153         error_setg(errp, "NVIDIA GPUDirect Clique ID: unsupported PCI class");
2154         return -EINVAL;
2155     }
2156 
2157     ret = pci_add_capability(pdev, PCI_CAP_ID_VNDR, pos, 8, errp);
2158     if (ret < 0) {
2159         error_prepend(errp, "Failed to add NVIDIA GPUDirect cap: ");
2160         return ret;
2161     }
2162 
2163     memset(vdev->emulated_config_bits + pos, 0xFF, 8);
2164     pos += PCI_CAP_FLAGS;
2165     pci_set_byte(pdev->config + pos++, 8);
2166     pci_set_byte(pdev->config + pos++, 'P');
2167     pci_set_byte(pdev->config + pos++, '2');
2168     pci_set_byte(pdev->config + pos++, 'P');
2169     pci_set_byte(pdev->config + pos++, vdev->nv_gpudirect_clique << 3);
2170     pci_set_byte(pdev->config + pos, 0);
2171 
2172     return 0;
2173 }
2174 
2175 int vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp)
2176 {
2177     int ret;
2178 
2179     ret = vfio_add_nv_gpudirect_cap(vdev, errp);
2180     if (ret) {
2181         return ret;
2182     }
2183 
2184     return 0;
2185 }
2186 
2187 static void vfio_pci_nvlink2_get_tgt(Object *obj, Visitor *v,
2188                                      const char *name,
2189                                      void *opaque, Error **errp)
2190 {
2191     uint64_t tgt = (uintptr_t) opaque;
2192     visit_type_uint64(v, name, &tgt, errp);
2193 }
2194 
2195 static void vfio_pci_nvlink2_get_link_speed(Object *obj, Visitor *v,
2196                                                  const char *name,
2197                                                  void *opaque, Error **errp)
2198 {
2199     uint32_t link_speed = (uint32_t)(uintptr_t) opaque;
2200     visit_type_uint32(v, name, &link_speed, errp);
2201 }
2202 
2203 int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp)
2204 {
2205     int ret;
2206     void *p;
2207     struct vfio_region_info *nv2reg = NULL;
2208     struct vfio_info_cap_header *hdr;
2209     struct vfio_region_info_cap_nvlink2_ssatgt *cap;
2210     VFIOQuirk *quirk;
2211 
2212     ret = vfio_get_dev_region_info(&vdev->vbasedev,
2213                                    VFIO_REGION_TYPE_PCI_VENDOR_TYPE |
2214                                    PCI_VENDOR_ID_NVIDIA,
2215                                    VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM,
2216                                    &nv2reg);
2217     if (ret) {
2218         return ret;
2219     }
2220 
2221     hdr = vfio_get_region_info_cap(nv2reg, VFIO_REGION_INFO_CAP_NVLINK2_SSATGT);
2222     if (!hdr) {
2223         ret = -ENODEV;
2224         goto free_exit;
2225     }
2226     cap = (void *) hdr;
2227 
2228     p = mmap(NULL, nv2reg->size, PROT_READ | PROT_WRITE | PROT_EXEC,
2229              MAP_SHARED, vdev->vbasedev.fd, nv2reg->offset);
2230     if (p == MAP_FAILED) {
2231         ret = -errno;
2232         goto free_exit;
2233     }
2234 
2235     quirk = vfio_quirk_alloc(1);
2236     memory_region_init_ram_ptr(&quirk->mem[0], OBJECT(vdev), "nvlink2-mr",
2237                                nv2reg->size, p);
2238     QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next);
2239 
2240     object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64",
2241                         vfio_pci_nvlink2_get_tgt, NULL, NULL,
2242                         (void *) (uintptr_t) cap->tgt, NULL);
2243     trace_vfio_pci_nvidia_gpu_setup_quirk(vdev->vbasedev.name, cap->tgt,
2244                                           nv2reg->size);
2245 free_exit:
2246     g_free(nv2reg);
2247 
2248     return ret;
2249 }
2250 
2251 int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp)
2252 {
2253     int ret;
2254     void *p;
2255     struct vfio_region_info *atsdreg = NULL;
2256     struct vfio_info_cap_header *hdr;
2257     struct vfio_region_info_cap_nvlink2_ssatgt *captgt;
2258     struct vfio_region_info_cap_nvlink2_lnkspd *capspeed;
2259     VFIOQuirk *quirk;
2260 
2261     ret = vfio_get_dev_region_info(&vdev->vbasedev,
2262                                    VFIO_REGION_TYPE_PCI_VENDOR_TYPE |
2263                                    PCI_VENDOR_ID_IBM,
2264                                    VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD,
2265                                    &atsdreg);
2266     if (ret) {
2267         return ret;
2268     }
2269 
2270     hdr = vfio_get_region_info_cap(atsdreg,
2271                                    VFIO_REGION_INFO_CAP_NVLINK2_SSATGT);
2272     if (!hdr) {
2273         ret = -ENODEV;
2274         goto free_exit;
2275     }
2276     captgt = (void *) hdr;
2277 
2278     hdr = vfio_get_region_info_cap(atsdreg,
2279                                    VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD);
2280     if (!hdr) {
2281         ret = -ENODEV;
2282         goto free_exit;
2283     }
2284     capspeed = (void *) hdr;
2285 
2286     /* Some NVLink bridges may not have assigned ATSD */
2287     if (atsdreg->size) {
2288         p = mmap(NULL, atsdreg->size, PROT_READ | PROT_WRITE | PROT_EXEC,
2289                  MAP_SHARED, vdev->vbasedev.fd, atsdreg->offset);
2290         if (p == MAP_FAILED) {
2291             ret = -errno;
2292             goto free_exit;
2293         }
2294 
2295         quirk = vfio_quirk_alloc(1);
2296         memory_region_init_ram_device_ptr(&quirk->mem[0], OBJECT(vdev),
2297                                           "nvlink2-atsd-mr", atsdreg->size, p);
2298         QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next);
2299     }
2300 
2301     object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64",
2302                         vfio_pci_nvlink2_get_tgt, NULL, NULL,
2303                         (void *) (uintptr_t) captgt->tgt, NULL);
2304     trace_vfio_pci_nvlink2_setup_quirk_ssatgt(vdev->vbasedev.name, captgt->tgt,
2305                                               atsdreg->size);
2306 
2307     object_property_add(OBJECT(vdev), "nvlink2-link-speed", "uint32",
2308                         vfio_pci_nvlink2_get_link_speed, NULL, NULL,
2309                         (void *) (uintptr_t) capspeed->link_speed, NULL);
2310     trace_vfio_pci_nvlink2_setup_quirk_lnkspd(vdev->vbasedev.name,
2311                                               capspeed->link_speed);
2312 free_exit:
2313     g_free(atsdreg);
2314 
2315     return ret;
2316 }
2317