xref: /qemu/hw/virtio/virtio-iommu.c (revision c4b8ffcb)
1 /*
2  * virtio-iommu device
3  *
4  * Copyright (c) 2020 Red Hat, Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/iov.h"
23 #include "hw/qdev-properties.h"
24 #include "hw/virtio/virtio.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/reset.h"
27 #include "qapi/error.h"
28 #include "qemu/error-report.h"
29 #include "trace.h"
30 
31 #include "standard-headers/linux/virtio_ids.h"
32 
33 #include "hw/virtio/virtio-bus.h"
34 #include "hw/virtio/virtio-access.h"
35 #include "hw/virtio/virtio-iommu.h"
36 #include "hw/pci/pci_bus.h"
37 #include "hw/pci/pci.h"
38 
39 /* Max size */
40 #define VIOMMU_DEFAULT_QUEUE_SIZE 256
41 #define VIOMMU_PROBE_SIZE 512
42 
43 typedef struct VirtIOIOMMUDomain {
44     uint32_t id;
45     bool bypass;
46     GTree *mappings;
47     QLIST_HEAD(, VirtIOIOMMUEndpoint) endpoint_list;
48 } VirtIOIOMMUDomain;
49 
50 typedef struct VirtIOIOMMUEndpoint {
51     uint32_t id;
52     VirtIOIOMMUDomain *domain;
53     IOMMUMemoryRegion *iommu_mr;
54     QLIST_ENTRY(VirtIOIOMMUEndpoint) next;
55 } VirtIOIOMMUEndpoint;
56 
57 typedef struct VirtIOIOMMUInterval {
58     uint64_t low;
59     uint64_t high;
60 } VirtIOIOMMUInterval;
61 
62 typedef struct VirtIOIOMMUMapping {
63     uint64_t phys_addr;
64     uint32_t flags;
65 } VirtIOIOMMUMapping;
66 
67 static inline uint16_t virtio_iommu_get_bdf(IOMMUDevice *dev)
68 {
69     return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn);
70 }
71 
72 /**
73  * The bus number is used for lookup when SID based operations occur.
74  * In that case we lazily populate the IOMMUPciBus array from the bus hash
75  * table. At the time the IOMMUPciBus is created (iommu_find_add_as), the bus
76  * numbers may not be always initialized yet.
77  */
78 static IOMMUPciBus *iommu_find_iommu_pcibus(VirtIOIOMMU *s, uint8_t bus_num)
79 {
80     IOMMUPciBus *iommu_pci_bus = s->iommu_pcibus_by_bus_num[bus_num];
81 
82     if (!iommu_pci_bus) {
83         GHashTableIter iter;
84 
85         g_hash_table_iter_init(&iter, s->as_by_busptr);
86         while (g_hash_table_iter_next(&iter, NULL, (void **)&iommu_pci_bus)) {
87             if (pci_bus_num(iommu_pci_bus->bus) == bus_num) {
88                 s->iommu_pcibus_by_bus_num[bus_num] = iommu_pci_bus;
89                 return iommu_pci_bus;
90             }
91         }
92         return NULL;
93     }
94     return iommu_pci_bus;
95 }
96 
97 static IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid)
98 {
99     uint8_t bus_n, devfn;
100     IOMMUPciBus *iommu_pci_bus;
101     IOMMUDevice *dev;
102 
103     bus_n = PCI_BUS_NUM(sid);
104     iommu_pci_bus = iommu_find_iommu_pcibus(s, bus_n);
105     if (iommu_pci_bus) {
106         devfn = sid & (PCI_DEVFN_MAX - 1);
107         dev = iommu_pci_bus->pbdev[devfn];
108         if (dev) {
109             return &dev->iommu_mr;
110         }
111     }
112     return NULL;
113 }
114 
115 static gint interval_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
116 {
117     VirtIOIOMMUInterval *inta = (VirtIOIOMMUInterval *)a;
118     VirtIOIOMMUInterval *intb = (VirtIOIOMMUInterval *)b;
119 
120     if (inta->high < intb->low) {
121         return -1;
122     } else if (intb->high < inta->low) {
123         return 1;
124     } else {
125         return 0;
126     }
127 }
128 
129 static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start,
130                                     hwaddr virt_end, hwaddr paddr,
131                                     uint32_t flags)
132 {
133     IOMMUTLBEvent event;
134     IOMMUAccessFlags perm = IOMMU_ACCESS_FLAG(flags & VIRTIO_IOMMU_MAP_F_READ,
135                                               flags & VIRTIO_IOMMU_MAP_F_WRITE);
136 
137     if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_MAP) ||
138         (flags & VIRTIO_IOMMU_MAP_F_MMIO) || !perm) {
139         return;
140     }
141 
142     trace_virtio_iommu_notify_map(mr->parent_obj.name, virt_start, virt_end,
143                                   paddr, perm);
144 
145     event.type = IOMMU_NOTIFIER_MAP;
146     event.entry.target_as = &address_space_memory;
147     event.entry.addr_mask = virt_end - virt_start;
148     event.entry.iova = virt_start;
149     event.entry.perm = perm;
150     event.entry.translated_addr = paddr;
151 
152     memory_region_notify_iommu(mr, 0, event);
153 }
154 
155 static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start,
156                                       hwaddr virt_end)
157 {
158     IOMMUTLBEvent event;
159     uint64_t delta = virt_end - virt_start;
160 
161     if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_UNMAP)) {
162         return;
163     }
164 
165     trace_virtio_iommu_notify_unmap(mr->parent_obj.name, virt_start, virt_end);
166 
167     event.type = IOMMU_NOTIFIER_UNMAP;
168     event.entry.target_as = &address_space_memory;
169     event.entry.perm = IOMMU_NONE;
170     event.entry.translated_addr = 0;
171     event.entry.addr_mask = delta;
172     event.entry.iova = virt_start;
173 
174     if (delta == UINT64_MAX) {
175         memory_region_notify_iommu(mr, 0, event);
176     }
177 
178 
179     while (virt_start != virt_end + 1) {
180         uint64_t mask = dma_aligned_pow2_mask(virt_start, virt_end, 64);
181 
182         event.entry.addr_mask = mask;
183         event.entry.iova = virt_start;
184         memory_region_notify_iommu(mr, 0, event);
185         virt_start += mask + 1;
186     }
187 }
188 
189 static gboolean virtio_iommu_notify_unmap_cb(gpointer key, gpointer value,
190                                              gpointer data)
191 {
192     VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key;
193     IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data;
194 
195     virtio_iommu_notify_unmap(mr, interval->low, interval->high);
196 
197     return false;
198 }
199 
200 static gboolean virtio_iommu_notify_map_cb(gpointer key, gpointer value,
201                                            gpointer data)
202 {
203     VirtIOIOMMUMapping *mapping = (VirtIOIOMMUMapping *) value;
204     VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key;
205     IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data;
206 
207     virtio_iommu_notify_map(mr, interval->low, interval->high,
208                             mapping->phys_addr, mapping->flags);
209 
210     return false;
211 }
212 
213 static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep)
214 {
215     VirtIOIOMMUDomain *domain = ep->domain;
216 
217     if (!ep->domain) {
218         return;
219     }
220     g_tree_foreach(domain->mappings, virtio_iommu_notify_unmap_cb,
221                    ep->iommu_mr);
222     QLIST_REMOVE(ep, next);
223     ep->domain = NULL;
224 }
225 
226 static VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s,
227                                                       uint32_t ep_id)
228 {
229     VirtIOIOMMUEndpoint *ep;
230     IOMMUMemoryRegion *mr;
231 
232     ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
233     if (ep) {
234         return ep;
235     }
236     mr = virtio_iommu_mr(s, ep_id);
237     if (!mr) {
238         return NULL;
239     }
240     ep = g_malloc0(sizeof(*ep));
241     ep->id = ep_id;
242     ep->iommu_mr = mr;
243     trace_virtio_iommu_get_endpoint(ep_id);
244     g_tree_insert(s->endpoints, GUINT_TO_POINTER(ep_id), ep);
245     return ep;
246 }
247 
248 static void virtio_iommu_put_endpoint(gpointer data)
249 {
250     VirtIOIOMMUEndpoint *ep = (VirtIOIOMMUEndpoint *)data;
251 
252     if (ep->domain) {
253         virtio_iommu_detach_endpoint_from_domain(ep);
254     }
255 
256     trace_virtio_iommu_put_endpoint(ep->id);
257     g_free(ep);
258 }
259 
260 static VirtIOIOMMUDomain *virtio_iommu_get_domain(VirtIOIOMMU *s,
261                                                   uint32_t domain_id,
262                                                   bool bypass)
263 {
264     VirtIOIOMMUDomain *domain;
265 
266     domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
267     if (domain) {
268         if (domain->bypass != bypass) {
269             return NULL;
270         }
271         return domain;
272     }
273     domain = g_malloc0(sizeof(*domain));
274     domain->id = domain_id;
275     domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp,
276                                    NULL, (GDestroyNotify)g_free,
277                                    (GDestroyNotify)g_free);
278     domain->bypass = bypass;
279     g_tree_insert(s->domains, GUINT_TO_POINTER(domain_id), domain);
280     QLIST_INIT(&domain->endpoint_list);
281     trace_virtio_iommu_get_domain(domain_id);
282     return domain;
283 }
284 
285 static void virtio_iommu_put_domain(gpointer data)
286 {
287     VirtIOIOMMUDomain *domain = (VirtIOIOMMUDomain *)data;
288     VirtIOIOMMUEndpoint *iter, *tmp;
289 
290     QLIST_FOREACH_SAFE(iter, &domain->endpoint_list, next, tmp) {
291         virtio_iommu_detach_endpoint_from_domain(iter);
292     }
293     g_tree_destroy(domain->mappings);
294     trace_virtio_iommu_put_domain(domain->id);
295     g_free(domain);
296 }
297 
298 static AddressSpace *virtio_iommu_find_add_as(PCIBus *bus, void *opaque,
299                                               int devfn)
300 {
301     VirtIOIOMMU *s = opaque;
302     IOMMUPciBus *sbus = g_hash_table_lookup(s->as_by_busptr, bus);
303     static uint32_t mr_index;
304     IOMMUDevice *sdev;
305 
306     if (!sbus) {
307         sbus = g_malloc0(sizeof(IOMMUPciBus) +
308                          sizeof(IOMMUDevice *) * PCI_DEVFN_MAX);
309         sbus->bus = bus;
310         g_hash_table_insert(s->as_by_busptr, bus, sbus);
311     }
312 
313     sdev = sbus->pbdev[devfn];
314     if (!sdev) {
315         char *name = g_strdup_printf("%s-%d-%d",
316                                      TYPE_VIRTIO_IOMMU_MEMORY_REGION,
317                                      mr_index++, devfn);
318         sdev = sbus->pbdev[devfn] = g_new0(IOMMUDevice, 1);
319 
320         sdev->viommu = s;
321         sdev->bus = bus;
322         sdev->devfn = devfn;
323 
324         trace_virtio_iommu_init_iommu_mr(name);
325 
326         memory_region_init_iommu(&sdev->iommu_mr, sizeof(sdev->iommu_mr),
327                                  TYPE_VIRTIO_IOMMU_MEMORY_REGION,
328                                  OBJECT(s), name,
329                                  UINT64_MAX);
330         address_space_init(&sdev->as,
331                            MEMORY_REGION(&sdev->iommu_mr), TYPE_VIRTIO_IOMMU);
332         g_free(name);
333     }
334     return &sdev->as;
335 }
336 
337 static int virtio_iommu_attach(VirtIOIOMMU *s,
338                                struct virtio_iommu_req_attach *req)
339 {
340     uint32_t domain_id = le32_to_cpu(req->domain);
341     uint32_t ep_id = le32_to_cpu(req->endpoint);
342     uint32_t flags = le32_to_cpu(req->flags);
343     VirtIOIOMMUDomain *domain;
344     VirtIOIOMMUEndpoint *ep;
345 
346     trace_virtio_iommu_attach(domain_id, ep_id);
347 
348     if (flags & ~VIRTIO_IOMMU_ATTACH_F_BYPASS) {
349         return VIRTIO_IOMMU_S_INVAL;
350     }
351 
352     ep = virtio_iommu_get_endpoint(s, ep_id);
353     if (!ep) {
354         return VIRTIO_IOMMU_S_NOENT;
355     }
356 
357     if (ep->domain) {
358         VirtIOIOMMUDomain *previous_domain = ep->domain;
359         /*
360          * the device is already attached to a domain,
361          * detach it first
362          */
363         virtio_iommu_detach_endpoint_from_domain(ep);
364         if (QLIST_EMPTY(&previous_domain->endpoint_list)) {
365             g_tree_remove(s->domains, GUINT_TO_POINTER(previous_domain->id));
366         }
367     }
368 
369     domain = virtio_iommu_get_domain(s, domain_id,
370                                      flags & VIRTIO_IOMMU_ATTACH_F_BYPASS);
371     if (!domain) {
372         /* Incompatible bypass flag */
373         return VIRTIO_IOMMU_S_INVAL;
374     }
375     QLIST_INSERT_HEAD(&domain->endpoint_list, ep, next);
376 
377     ep->domain = domain;
378 
379     /* Replay domain mappings on the associated memory region */
380     g_tree_foreach(domain->mappings, virtio_iommu_notify_map_cb,
381                    ep->iommu_mr);
382 
383     return VIRTIO_IOMMU_S_OK;
384 }
385 
386 static int virtio_iommu_detach(VirtIOIOMMU *s,
387                                struct virtio_iommu_req_detach *req)
388 {
389     uint32_t domain_id = le32_to_cpu(req->domain);
390     uint32_t ep_id = le32_to_cpu(req->endpoint);
391     VirtIOIOMMUDomain *domain;
392     VirtIOIOMMUEndpoint *ep;
393 
394     trace_virtio_iommu_detach(domain_id, ep_id);
395 
396     ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
397     if (!ep) {
398         return VIRTIO_IOMMU_S_NOENT;
399     }
400 
401     domain = ep->domain;
402 
403     if (!domain || domain->id != domain_id) {
404         return VIRTIO_IOMMU_S_INVAL;
405     }
406 
407     virtio_iommu_detach_endpoint_from_domain(ep);
408 
409     if (QLIST_EMPTY(&domain->endpoint_list)) {
410         g_tree_remove(s->domains, GUINT_TO_POINTER(domain->id));
411     }
412     return VIRTIO_IOMMU_S_OK;
413 }
414 
415 static int virtio_iommu_map(VirtIOIOMMU *s,
416                             struct virtio_iommu_req_map *req)
417 {
418     uint32_t domain_id = le32_to_cpu(req->domain);
419     uint64_t phys_start = le64_to_cpu(req->phys_start);
420     uint64_t virt_start = le64_to_cpu(req->virt_start);
421     uint64_t virt_end = le64_to_cpu(req->virt_end);
422     uint32_t flags = le32_to_cpu(req->flags);
423     VirtIOIOMMUDomain *domain;
424     VirtIOIOMMUInterval *interval;
425     VirtIOIOMMUMapping *mapping;
426     VirtIOIOMMUEndpoint *ep;
427 
428     if (flags & ~VIRTIO_IOMMU_MAP_F_MASK) {
429         return VIRTIO_IOMMU_S_INVAL;
430     }
431 
432     domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
433     if (!domain) {
434         return VIRTIO_IOMMU_S_NOENT;
435     }
436 
437     if (domain->bypass) {
438         return VIRTIO_IOMMU_S_INVAL;
439     }
440 
441     interval = g_malloc0(sizeof(*interval));
442 
443     interval->low = virt_start;
444     interval->high = virt_end;
445 
446     mapping = g_tree_lookup(domain->mappings, (gpointer)interval);
447     if (mapping) {
448         g_free(interval);
449         return VIRTIO_IOMMU_S_INVAL;
450     }
451 
452     trace_virtio_iommu_map(domain_id, virt_start, virt_end, phys_start, flags);
453 
454     mapping = g_malloc0(sizeof(*mapping));
455     mapping->phys_addr = phys_start;
456     mapping->flags = flags;
457 
458     g_tree_insert(domain->mappings, interval, mapping);
459 
460     QLIST_FOREACH(ep, &domain->endpoint_list, next) {
461         virtio_iommu_notify_map(ep->iommu_mr, virt_start, virt_end, phys_start,
462                                 flags);
463     }
464 
465     return VIRTIO_IOMMU_S_OK;
466 }
467 
468 static int virtio_iommu_unmap(VirtIOIOMMU *s,
469                               struct virtio_iommu_req_unmap *req)
470 {
471     uint32_t domain_id = le32_to_cpu(req->domain);
472     uint64_t virt_start = le64_to_cpu(req->virt_start);
473     uint64_t virt_end = le64_to_cpu(req->virt_end);
474     VirtIOIOMMUMapping *iter_val;
475     VirtIOIOMMUInterval interval, *iter_key;
476     VirtIOIOMMUDomain *domain;
477     VirtIOIOMMUEndpoint *ep;
478     int ret = VIRTIO_IOMMU_S_OK;
479 
480     trace_virtio_iommu_unmap(domain_id, virt_start, virt_end);
481 
482     domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
483     if (!domain) {
484         return VIRTIO_IOMMU_S_NOENT;
485     }
486 
487     if (domain->bypass) {
488         return VIRTIO_IOMMU_S_INVAL;
489     }
490 
491     interval.low = virt_start;
492     interval.high = virt_end;
493 
494     while (g_tree_lookup_extended(domain->mappings, &interval,
495                                   (void **)&iter_key, (void**)&iter_val)) {
496         uint64_t current_low = iter_key->low;
497         uint64_t current_high = iter_key->high;
498 
499         if (interval.low <= current_low && interval.high >= current_high) {
500             QLIST_FOREACH(ep, &domain->endpoint_list, next) {
501                 virtio_iommu_notify_unmap(ep->iommu_mr, current_low,
502                                           current_high);
503             }
504             g_tree_remove(domain->mappings, iter_key);
505             trace_virtio_iommu_unmap_done(domain_id, current_low, current_high);
506         } else {
507             ret = VIRTIO_IOMMU_S_RANGE;
508             break;
509         }
510     }
511     return ret;
512 }
513 
514 static ssize_t virtio_iommu_fill_resv_mem_prop(VirtIOIOMMU *s, uint32_t ep,
515                                                uint8_t *buf, size_t free)
516 {
517     struct virtio_iommu_probe_resv_mem prop = {};
518     size_t size = sizeof(prop), length = size - sizeof(prop.head), total;
519     int i;
520 
521     total = size * s->nb_reserved_regions;
522 
523     if (total > free) {
524         return -ENOSPC;
525     }
526 
527     for (i = 0; i < s->nb_reserved_regions; i++) {
528         unsigned subtype = s->reserved_regions[i].type;
529 
530         assert(subtype == VIRTIO_IOMMU_RESV_MEM_T_RESERVED ||
531                subtype == VIRTIO_IOMMU_RESV_MEM_T_MSI);
532         prop.head.type = cpu_to_le16(VIRTIO_IOMMU_PROBE_T_RESV_MEM);
533         prop.head.length = cpu_to_le16(length);
534         prop.subtype = subtype;
535         prop.start = cpu_to_le64(s->reserved_regions[i].low);
536         prop.end = cpu_to_le64(s->reserved_regions[i].high);
537 
538         memcpy(buf, &prop, size);
539 
540         trace_virtio_iommu_fill_resv_property(ep, prop.subtype,
541                                               prop.start, prop.end);
542         buf += size;
543     }
544     return total;
545 }
546 
547 /**
548  * virtio_iommu_probe - Fill the probe request buffer with
549  * the properties the device is able to return
550  */
551 static int virtio_iommu_probe(VirtIOIOMMU *s,
552                               struct virtio_iommu_req_probe *req,
553                               uint8_t *buf)
554 {
555     uint32_t ep_id = le32_to_cpu(req->endpoint);
556     size_t free = VIOMMU_PROBE_SIZE;
557     ssize_t count;
558 
559     if (!virtio_iommu_mr(s, ep_id)) {
560         return VIRTIO_IOMMU_S_NOENT;
561     }
562 
563     count = virtio_iommu_fill_resv_mem_prop(s, ep_id, buf, free);
564     if (count < 0) {
565         return VIRTIO_IOMMU_S_INVAL;
566     }
567     buf += count;
568     free -= count;
569 
570     return VIRTIO_IOMMU_S_OK;
571 }
572 
573 static int virtio_iommu_iov_to_req(struct iovec *iov,
574                                    unsigned int iov_cnt,
575                                    void *req, size_t req_sz)
576 {
577     size_t sz, payload_sz = req_sz - sizeof(struct virtio_iommu_req_tail);
578 
579     sz = iov_to_buf(iov, iov_cnt, 0, req, payload_sz);
580     if (unlikely(sz != payload_sz)) {
581         return VIRTIO_IOMMU_S_INVAL;
582     }
583     return 0;
584 }
585 
586 #define virtio_iommu_handle_req(__req)                                  \
587 static int virtio_iommu_handle_ ## __req(VirtIOIOMMU *s,                \
588                                          struct iovec *iov,             \
589                                          unsigned int iov_cnt)          \
590 {                                                                       \
591     struct virtio_iommu_req_ ## __req req;                              \
592     int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req)); \
593                                                                         \
594     return ret ? ret : virtio_iommu_ ## __req(s, &req);                 \
595 }
596 
597 virtio_iommu_handle_req(attach)
598 virtio_iommu_handle_req(detach)
599 virtio_iommu_handle_req(map)
600 virtio_iommu_handle_req(unmap)
601 
602 static int virtio_iommu_handle_probe(VirtIOIOMMU *s,
603                                      struct iovec *iov,
604                                      unsigned int iov_cnt,
605                                      uint8_t *buf)
606 {
607     struct virtio_iommu_req_probe req;
608     int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req));
609 
610     return ret ? ret : virtio_iommu_probe(s, &req, buf);
611 }
612 
613 static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
614 {
615     VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
616     struct virtio_iommu_req_head head;
617     struct virtio_iommu_req_tail tail = {};
618     size_t output_size = sizeof(tail), sz;
619     VirtQueueElement *elem;
620     unsigned int iov_cnt;
621     struct iovec *iov;
622     void *buf = NULL;
623 
624     for (;;) {
625         elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
626         if (!elem) {
627             return;
628         }
629 
630         if (iov_size(elem->in_sg, elem->in_num) < sizeof(tail) ||
631             iov_size(elem->out_sg, elem->out_num) < sizeof(head)) {
632             virtio_error(vdev, "virtio-iommu bad head/tail size");
633             virtqueue_detach_element(vq, elem, 0);
634             g_free(elem);
635             break;
636         }
637 
638         iov_cnt = elem->out_num;
639         iov = elem->out_sg;
640         sz = iov_to_buf(iov, iov_cnt, 0, &head, sizeof(head));
641         if (unlikely(sz != sizeof(head))) {
642             tail.status = VIRTIO_IOMMU_S_DEVERR;
643             goto out;
644         }
645         qemu_mutex_lock(&s->mutex);
646         switch (head.type) {
647         case VIRTIO_IOMMU_T_ATTACH:
648             tail.status = virtio_iommu_handle_attach(s, iov, iov_cnt);
649             break;
650         case VIRTIO_IOMMU_T_DETACH:
651             tail.status = virtio_iommu_handle_detach(s, iov, iov_cnt);
652             break;
653         case VIRTIO_IOMMU_T_MAP:
654             tail.status = virtio_iommu_handle_map(s, iov, iov_cnt);
655             break;
656         case VIRTIO_IOMMU_T_UNMAP:
657             tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt);
658             break;
659         case VIRTIO_IOMMU_T_PROBE:
660         {
661             struct virtio_iommu_req_tail *ptail;
662 
663             output_size = s->config.probe_size + sizeof(tail);
664             buf = g_malloc0(output_size);
665 
666             ptail = (struct virtio_iommu_req_tail *)
667                         (buf + s->config.probe_size);
668             ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf);
669             break;
670         }
671         default:
672             tail.status = VIRTIO_IOMMU_S_UNSUPP;
673         }
674         qemu_mutex_unlock(&s->mutex);
675 
676 out:
677         sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
678                           buf ? buf : &tail, output_size);
679         assert(sz == output_size);
680 
681         virtqueue_push(vq, elem, sz);
682         virtio_notify(vdev, vq);
683         g_free(elem);
684         g_free(buf);
685         buf = NULL;
686     }
687 }
688 
689 static void virtio_iommu_report_fault(VirtIOIOMMU *viommu, uint8_t reason,
690                                       int flags, uint32_t endpoint,
691                                       uint64_t address)
692 {
693     VirtIODevice *vdev = &viommu->parent_obj;
694     VirtQueue *vq = viommu->event_vq;
695     struct virtio_iommu_fault fault;
696     VirtQueueElement *elem;
697     size_t sz;
698 
699     memset(&fault, 0, sizeof(fault));
700     fault.reason = reason;
701     fault.flags = cpu_to_le32(flags);
702     fault.endpoint = cpu_to_le32(endpoint);
703     fault.address = cpu_to_le64(address);
704 
705     elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
706 
707     if (!elem) {
708         error_report_once(
709             "no buffer available in event queue to report event");
710         return;
711     }
712 
713     if (iov_size(elem->in_sg, elem->in_num) < sizeof(fault)) {
714         virtio_error(vdev, "error buffer of wrong size");
715         virtqueue_detach_element(vq, elem, 0);
716         g_free(elem);
717         return;
718     }
719 
720     sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
721                       &fault, sizeof(fault));
722     assert(sz == sizeof(fault));
723 
724     trace_virtio_iommu_report_fault(reason, flags, endpoint, address);
725     virtqueue_push(vq, elem, sz);
726     virtio_notify(vdev, vq);
727     g_free(elem);
728 
729 }
730 
731 static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
732                                             IOMMUAccessFlags flag,
733                                             int iommu_idx)
734 {
735     IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
736     VirtIOIOMMUInterval interval, *mapping_key;
737     VirtIOIOMMUMapping *mapping_value;
738     VirtIOIOMMU *s = sdev->viommu;
739     bool read_fault, write_fault;
740     VirtIOIOMMUEndpoint *ep;
741     uint32_t sid, flags;
742     bool bypass_allowed;
743     bool found;
744     int i;
745 
746     interval.low = addr;
747     interval.high = addr + 1;
748 
749     IOMMUTLBEntry entry = {
750         .target_as = &address_space_memory,
751         .iova = addr,
752         .translated_addr = addr,
753         .addr_mask = (1 << ctz32(s->config.page_size_mask)) - 1,
754         .perm = IOMMU_NONE,
755     };
756 
757     bypass_allowed = s->config.bypass;
758 
759     sid = virtio_iommu_get_bdf(sdev);
760 
761     trace_virtio_iommu_translate(mr->parent_obj.name, sid, addr, flag);
762     qemu_mutex_lock(&s->mutex);
763 
764     ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid));
765     if (!ep) {
766         if (!bypass_allowed) {
767             error_report_once("%s sid=%d is not known!!", __func__, sid);
768             virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_UNKNOWN,
769                                       VIRTIO_IOMMU_FAULT_F_ADDRESS,
770                                       sid, addr);
771         } else {
772             entry.perm = flag;
773         }
774         goto unlock;
775     }
776 
777     for (i = 0; i < s->nb_reserved_regions; i++) {
778         ReservedRegion *reg = &s->reserved_regions[i];
779 
780         if (addr >= reg->low && addr <= reg->high) {
781             switch (reg->type) {
782             case VIRTIO_IOMMU_RESV_MEM_T_MSI:
783                 entry.perm = flag;
784                 break;
785             case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
786             default:
787                 virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
788                                           VIRTIO_IOMMU_FAULT_F_ADDRESS,
789                                           sid, addr);
790                 break;
791             }
792             goto unlock;
793         }
794     }
795 
796     if (!ep->domain) {
797         if (!bypass_allowed) {
798             error_report_once("%s %02x:%02x.%01x not attached to any domain",
799                               __func__, PCI_BUS_NUM(sid),
800                               PCI_SLOT(sid), PCI_FUNC(sid));
801             virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_DOMAIN,
802                                       VIRTIO_IOMMU_FAULT_F_ADDRESS,
803                                       sid, addr);
804         } else {
805             entry.perm = flag;
806         }
807         goto unlock;
808     } else if (ep->domain->bypass) {
809         entry.perm = flag;
810         goto unlock;
811     }
812 
813     found = g_tree_lookup_extended(ep->domain->mappings, (gpointer)(&interval),
814                                    (void **)&mapping_key,
815                                    (void **)&mapping_value);
816     if (!found) {
817         error_report_once("%s no mapping for 0x%"PRIx64" for sid=%d",
818                           __func__, addr, sid);
819         virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
820                                   VIRTIO_IOMMU_FAULT_F_ADDRESS,
821                                   sid, addr);
822         goto unlock;
823     }
824 
825     read_fault = (flag & IOMMU_RO) &&
826                     !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_READ);
827     write_fault = (flag & IOMMU_WO) &&
828                     !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_WRITE);
829 
830     flags = read_fault ? VIRTIO_IOMMU_FAULT_F_READ : 0;
831     flags |= write_fault ? VIRTIO_IOMMU_FAULT_F_WRITE : 0;
832     if (flags) {
833         error_report_once("%s permission error on 0x%"PRIx64"(%d): allowed=%d",
834                           __func__, addr, flag, mapping_value->flags);
835         flags |= VIRTIO_IOMMU_FAULT_F_ADDRESS;
836         virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
837                                   flags | VIRTIO_IOMMU_FAULT_F_ADDRESS,
838                                   sid, addr);
839         goto unlock;
840     }
841     entry.translated_addr = addr - mapping_key->low + mapping_value->phys_addr;
842     entry.perm = flag;
843     trace_virtio_iommu_translate_out(addr, entry.translated_addr, sid);
844 
845 unlock:
846     qemu_mutex_unlock(&s->mutex);
847     return entry;
848 }
849 
850 static void virtio_iommu_get_config(VirtIODevice *vdev, uint8_t *config_data)
851 {
852     VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
853     struct virtio_iommu_config *dev_config = &dev->config;
854     struct virtio_iommu_config *out_config = (void *)config_data;
855 
856     out_config->page_size_mask = cpu_to_le64(dev_config->page_size_mask);
857     out_config->input_range.start = cpu_to_le64(dev_config->input_range.start);
858     out_config->input_range.end = cpu_to_le64(dev_config->input_range.end);
859     out_config->domain_range.start = cpu_to_le32(dev_config->domain_range.start);
860     out_config->domain_range.end = cpu_to_le32(dev_config->domain_range.end);
861     out_config->probe_size = cpu_to_le32(dev_config->probe_size);
862     out_config->bypass = dev_config->bypass;
863 
864     trace_virtio_iommu_get_config(dev_config->page_size_mask,
865                                   dev_config->input_range.start,
866                                   dev_config->input_range.end,
867                                   dev_config->domain_range.start,
868                                   dev_config->domain_range.end,
869                                   dev_config->probe_size,
870                                   dev_config->bypass);
871 }
872 
873 static void virtio_iommu_set_config(VirtIODevice *vdev,
874                                     const uint8_t *config_data)
875 {
876     VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
877     struct virtio_iommu_config *dev_config = &dev->config;
878     const struct virtio_iommu_config *in_config = (void *)config_data;
879 
880     if (in_config->bypass != dev_config->bypass) {
881         if (!virtio_vdev_has_feature(vdev, VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
882             virtio_error(vdev, "cannot set config.bypass");
883             return;
884         } else if (in_config->bypass != 0 && in_config->bypass != 1) {
885             virtio_error(vdev, "invalid config.bypass value '%u'",
886                          in_config->bypass);
887             return;
888         }
889         dev_config->bypass = in_config->bypass;
890     }
891 
892     trace_virtio_iommu_set_config(in_config->bypass);
893 }
894 
895 static uint64_t virtio_iommu_get_features(VirtIODevice *vdev, uint64_t f,
896                                           Error **errp)
897 {
898     VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
899 
900     f |= dev->features;
901     trace_virtio_iommu_get_features(f);
902     return f;
903 }
904 
905 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
906 {
907     guint ua = GPOINTER_TO_UINT(a);
908     guint ub = GPOINTER_TO_UINT(b);
909     return (ua > ub) - (ua < ub);
910 }
911 
912 static gboolean virtio_iommu_remap(gpointer key, gpointer value, gpointer data)
913 {
914     VirtIOIOMMUMapping *mapping = (VirtIOIOMMUMapping *) value;
915     VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key;
916     IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data;
917 
918     trace_virtio_iommu_remap(mr->parent_obj.name, interval->low, interval->high,
919                              mapping->phys_addr);
920     virtio_iommu_notify_map(mr, interval->low, interval->high,
921                             mapping->phys_addr, mapping->flags);
922     return false;
923 }
924 
925 static void virtio_iommu_replay(IOMMUMemoryRegion *mr, IOMMUNotifier *n)
926 {
927     IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
928     VirtIOIOMMU *s = sdev->viommu;
929     uint32_t sid;
930     VirtIOIOMMUEndpoint *ep;
931 
932     sid = virtio_iommu_get_bdf(sdev);
933 
934     qemu_mutex_lock(&s->mutex);
935 
936     if (!s->endpoints) {
937         goto unlock;
938     }
939 
940     ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid));
941     if (!ep || !ep->domain) {
942         goto unlock;
943     }
944 
945     g_tree_foreach(ep->domain->mappings, virtio_iommu_remap, mr);
946 
947 unlock:
948     qemu_mutex_unlock(&s->mutex);
949 }
950 
951 static int virtio_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu_mr,
952                                             IOMMUNotifierFlag old,
953                                             IOMMUNotifierFlag new,
954                                             Error **errp)
955 {
956     if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
957         error_setg(errp, "Virtio-iommu does not support dev-iotlb yet");
958         return -EINVAL;
959     }
960 
961     if (old == IOMMU_NOTIFIER_NONE) {
962         trace_virtio_iommu_notify_flag_add(iommu_mr->parent_obj.name);
963     } else if (new == IOMMU_NOTIFIER_NONE) {
964         trace_virtio_iommu_notify_flag_del(iommu_mr->parent_obj.name);
965     }
966     return 0;
967 }
968 
969 /*
970  * The default mask (TARGET_PAGE_MASK) is the smallest supported guest granule,
971  * for example 0xfffffffffffff000. When an assigned device has page size
972  * restrictions due to the hardware IOMMU configuration, apply this restriction
973  * to the mask.
974  */
975 static int virtio_iommu_set_page_size_mask(IOMMUMemoryRegion *mr,
976                                            uint64_t new_mask,
977                                            Error **errp)
978 {
979     IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
980     VirtIOIOMMU *s = sdev->viommu;
981     uint64_t cur_mask = s->config.page_size_mask;
982 
983     trace_virtio_iommu_set_page_size_mask(mr->parent_obj.name, cur_mask,
984                                           new_mask);
985 
986     if ((cur_mask & new_mask) == 0) {
987         error_setg(errp, "virtio-iommu page mask 0x%"PRIx64
988                    " is incompatible with mask 0x%"PRIx64, cur_mask, new_mask);
989         return -1;
990     }
991 
992     /*
993      * After the machine is finalized, we can't change the mask anymore. If by
994      * chance the hotplugged device supports the same granule, we can still
995      * accept it. Having a different masks is possible but the guest will use
996      * sub-optimal block sizes, so warn about it.
997      */
998     if (phase_check(PHASE_MACHINE_READY)) {
999         int new_granule = ctz64(new_mask);
1000         int cur_granule = ctz64(cur_mask);
1001 
1002         if (new_granule != cur_granule) {
1003             error_setg(errp, "virtio-iommu page mask 0x%"PRIx64
1004                        " is incompatible with mask 0x%"PRIx64, cur_mask,
1005                        new_mask);
1006             return -1;
1007         } else if (new_mask != cur_mask) {
1008             warn_report("virtio-iommu page mask 0x%"PRIx64
1009                         " does not match 0x%"PRIx64, cur_mask, new_mask);
1010         }
1011         return 0;
1012     }
1013 
1014     s->config.page_size_mask &= new_mask;
1015     return 0;
1016 }
1017 
1018 static void virtio_iommu_system_reset(void *opaque)
1019 {
1020     VirtIOIOMMU *s = opaque;
1021 
1022     trace_virtio_iommu_system_reset();
1023 
1024     /*
1025      * config.bypass is sticky across device reset, but should be restored on
1026      * system reset
1027      */
1028     s->config.bypass = s->boot_bypass;
1029 }
1030 
1031 static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
1032 {
1033     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1034     VirtIOIOMMU *s = VIRTIO_IOMMU(dev);
1035 
1036     virtio_init(vdev, VIRTIO_ID_IOMMU, sizeof(struct virtio_iommu_config));
1037 
1038     memset(s->iommu_pcibus_by_bus_num, 0, sizeof(s->iommu_pcibus_by_bus_num));
1039 
1040     s->req_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE,
1041                              virtio_iommu_handle_command);
1042     s->event_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE, NULL);
1043 
1044     s->config.page_size_mask = TARGET_PAGE_MASK;
1045     s->config.input_range.end = UINT64_MAX;
1046     s->config.domain_range.end = UINT32_MAX;
1047     s->config.probe_size = VIOMMU_PROBE_SIZE;
1048 
1049     virtio_add_feature(&s->features, VIRTIO_RING_F_EVENT_IDX);
1050     virtio_add_feature(&s->features, VIRTIO_RING_F_INDIRECT_DESC);
1051     virtio_add_feature(&s->features, VIRTIO_F_VERSION_1);
1052     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_INPUT_RANGE);
1053     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_DOMAIN_RANGE);
1054     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MAP_UNMAP);
1055     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MMIO);
1056     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_PROBE);
1057     virtio_add_feature(&s->features, VIRTIO_IOMMU_F_BYPASS_CONFIG);
1058 
1059     qemu_mutex_init(&s->mutex);
1060 
1061     s->as_by_busptr = g_hash_table_new_full(NULL, NULL, NULL, g_free);
1062 
1063     if (s->primary_bus) {
1064         pci_setup_iommu(s->primary_bus, virtio_iommu_find_add_as, s);
1065     } else {
1066         error_setg(errp, "VIRTIO-IOMMU is not attached to any PCI bus!");
1067     }
1068 
1069     qemu_register_reset(virtio_iommu_system_reset, s);
1070 }
1071 
1072 static void virtio_iommu_device_unrealize(DeviceState *dev)
1073 {
1074     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1075     VirtIOIOMMU *s = VIRTIO_IOMMU(dev);
1076 
1077     qemu_unregister_reset(virtio_iommu_system_reset, s);
1078 
1079     g_hash_table_destroy(s->as_by_busptr);
1080     if (s->domains) {
1081         g_tree_destroy(s->domains);
1082     }
1083     if (s->endpoints) {
1084         g_tree_destroy(s->endpoints);
1085     }
1086 
1087     virtio_delete_queue(s->req_vq);
1088     virtio_delete_queue(s->event_vq);
1089     virtio_cleanup(vdev);
1090 }
1091 
1092 static void virtio_iommu_device_reset(VirtIODevice *vdev)
1093 {
1094     VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
1095 
1096     trace_virtio_iommu_device_reset();
1097 
1098     if (s->domains) {
1099         g_tree_destroy(s->domains);
1100     }
1101     if (s->endpoints) {
1102         g_tree_destroy(s->endpoints);
1103     }
1104     s->domains = g_tree_new_full((GCompareDataFunc)int_cmp,
1105                                  NULL, NULL, virtio_iommu_put_domain);
1106     s->endpoints = g_tree_new_full((GCompareDataFunc)int_cmp,
1107                                    NULL, NULL, virtio_iommu_put_endpoint);
1108 }
1109 
1110 static void virtio_iommu_set_status(VirtIODevice *vdev, uint8_t status)
1111 {
1112     trace_virtio_iommu_device_status(status);
1113 }
1114 
1115 static void virtio_iommu_instance_init(Object *obj)
1116 {
1117 }
1118 
1119 #define VMSTATE_INTERVAL                               \
1120 {                                                      \
1121     .name = "interval",                                \
1122     .version_id = 1,                                   \
1123     .minimum_version_id = 1,                           \
1124     .fields = (VMStateField[]) {                       \
1125         VMSTATE_UINT64(low, VirtIOIOMMUInterval),      \
1126         VMSTATE_UINT64(high, VirtIOIOMMUInterval),     \
1127         VMSTATE_END_OF_LIST()                          \
1128     }                                                  \
1129 }
1130 
1131 #define VMSTATE_MAPPING                               \
1132 {                                                     \
1133     .name = "mapping",                                \
1134     .version_id = 1,                                  \
1135     .minimum_version_id = 1,                          \
1136     .fields = (VMStateField[]) {                      \
1137         VMSTATE_UINT64(phys_addr, VirtIOIOMMUMapping),\
1138         VMSTATE_UINT32(flags, VirtIOIOMMUMapping),    \
1139         VMSTATE_END_OF_LIST()                         \
1140     },                                                \
1141 }
1142 
1143 static const VMStateDescription vmstate_interval_mapping[2] = {
1144     VMSTATE_MAPPING,   /* value */
1145     VMSTATE_INTERVAL   /* key   */
1146 };
1147 
1148 static int domain_preload(void *opaque)
1149 {
1150     VirtIOIOMMUDomain *domain = opaque;
1151 
1152     domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp,
1153                                        NULL, g_free, g_free);
1154     return 0;
1155 }
1156 
1157 static const VMStateDescription vmstate_endpoint = {
1158     .name = "endpoint",
1159     .version_id = 1,
1160     .minimum_version_id = 1,
1161     .fields = (VMStateField[]) {
1162         VMSTATE_UINT32(id, VirtIOIOMMUEndpoint),
1163         VMSTATE_END_OF_LIST()
1164     }
1165 };
1166 
1167 static const VMStateDescription vmstate_domain = {
1168     .name = "domain",
1169     .version_id = 2,
1170     .minimum_version_id = 2,
1171     .pre_load = domain_preload,
1172     .fields = (VMStateField[]) {
1173         VMSTATE_UINT32(id, VirtIOIOMMUDomain),
1174         VMSTATE_GTREE_V(mappings, VirtIOIOMMUDomain, 1,
1175                         vmstate_interval_mapping,
1176                         VirtIOIOMMUInterval, VirtIOIOMMUMapping),
1177         VMSTATE_QLIST_V(endpoint_list, VirtIOIOMMUDomain, 1,
1178                         vmstate_endpoint, VirtIOIOMMUEndpoint, next),
1179         VMSTATE_BOOL_V(bypass, VirtIOIOMMUDomain, 2),
1180         VMSTATE_END_OF_LIST()
1181     }
1182 };
1183 
1184 static gboolean reconstruct_endpoints(gpointer key, gpointer value,
1185                                       gpointer data)
1186 {
1187     VirtIOIOMMU *s = (VirtIOIOMMU *)data;
1188     VirtIOIOMMUDomain *d = (VirtIOIOMMUDomain *)value;
1189     VirtIOIOMMUEndpoint *iter;
1190     IOMMUMemoryRegion *mr;
1191 
1192     QLIST_FOREACH(iter, &d->endpoint_list, next) {
1193         mr = virtio_iommu_mr(s, iter->id);
1194         assert(mr);
1195 
1196         iter->domain = d;
1197         iter->iommu_mr = mr;
1198         g_tree_insert(s->endpoints, GUINT_TO_POINTER(iter->id), iter);
1199     }
1200     return false; /* continue the domain traversal */
1201 }
1202 
1203 static int iommu_post_load(void *opaque, int version_id)
1204 {
1205     VirtIOIOMMU *s = opaque;
1206 
1207     g_tree_foreach(s->domains, reconstruct_endpoints, s);
1208     return 0;
1209 }
1210 
1211 static const VMStateDescription vmstate_virtio_iommu_device = {
1212     .name = "virtio-iommu-device",
1213     .minimum_version_id = 2,
1214     .version_id = 2,
1215     .post_load = iommu_post_load,
1216     .fields = (VMStateField[]) {
1217         VMSTATE_GTREE_DIRECT_KEY_V(domains, VirtIOIOMMU, 2,
1218                                    &vmstate_domain, VirtIOIOMMUDomain),
1219         VMSTATE_UINT8_V(config.bypass, VirtIOIOMMU, 2),
1220         VMSTATE_END_OF_LIST()
1221     },
1222 };
1223 
1224 static const VMStateDescription vmstate_virtio_iommu = {
1225     .name = "virtio-iommu",
1226     .minimum_version_id = 2,
1227     .priority = MIG_PRI_IOMMU,
1228     .version_id = 2,
1229     .fields = (VMStateField[]) {
1230         VMSTATE_VIRTIO_DEVICE,
1231         VMSTATE_END_OF_LIST()
1232     },
1233 };
1234 
1235 static Property virtio_iommu_properties[] = {
1236     DEFINE_PROP_LINK("primary-bus", VirtIOIOMMU, primary_bus, "PCI", PCIBus *),
1237     DEFINE_PROP_BOOL("boot-bypass", VirtIOIOMMU, boot_bypass, true),
1238     DEFINE_PROP_END_OF_LIST(),
1239 };
1240 
1241 static void virtio_iommu_class_init(ObjectClass *klass, void *data)
1242 {
1243     DeviceClass *dc = DEVICE_CLASS(klass);
1244     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1245 
1246     device_class_set_props(dc, virtio_iommu_properties);
1247     dc->vmsd = &vmstate_virtio_iommu;
1248 
1249     set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1250     vdc->realize = virtio_iommu_device_realize;
1251     vdc->unrealize = virtio_iommu_device_unrealize;
1252     vdc->reset = virtio_iommu_device_reset;
1253     vdc->get_config = virtio_iommu_get_config;
1254     vdc->set_config = virtio_iommu_set_config;
1255     vdc->get_features = virtio_iommu_get_features;
1256     vdc->set_status = virtio_iommu_set_status;
1257     vdc->vmsd = &vmstate_virtio_iommu_device;
1258 }
1259 
1260 static void virtio_iommu_memory_region_class_init(ObjectClass *klass,
1261                                                   void *data)
1262 {
1263     IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1264 
1265     imrc->translate = virtio_iommu_translate;
1266     imrc->replay = virtio_iommu_replay;
1267     imrc->notify_flag_changed = virtio_iommu_notify_flag_changed;
1268     imrc->iommu_set_page_size_mask = virtio_iommu_set_page_size_mask;
1269 }
1270 
1271 static const TypeInfo virtio_iommu_info = {
1272     .name = TYPE_VIRTIO_IOMMU,
1273     .parent = TYPE_VIRTIO_DEVICE,
1274     .instance_size = sizeof(VirtIOIOMMU),
1275     .instance_init = virtio_iommu_instance_init,
1276     .class_init = virtio_iommu_class_init,
1277 };
1278 
1279 static const TypeInfo virtio_iommu_memory_region_info = {
1280     .parent = TYPE_IOMMU_MEMORY_REGION,
1281     .name = TYPE_VIRTIO_IOMMU_MEMORY_REGION,
1282     .class_init = virtio_iommu_memory_region_class_init,
1283 };
1284 
1285 static void virtio_register_types(void)
1286 {
1287     type_register_static(&virtio_iommu_info);
1288     type_register_static(&virtio_iommu_memory_region_info);
1289 }
1290 
1291 type_init(virtio_register_types)
1292