1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
3 
4 /*
5  * If we have new enough libxenctrl then we do not want/need these compat
6  * interfaces, despite what the user supplied cflags might say. They
7  * must be undefined before including xenctrl.h
8  */
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
12 
13 #include <xenctrl.h>
14 #include <xenstore.h>
15 #include "hw/xen/interface/io/xenbus.h"
16 
17 #include "hw/xen/xen.h"
18 #include "hw/pci/pci.h"
19 #include "hw/xen/trace.h"
20 
21 extern xc_interface *xen_xc;
22 
23 /*
24  * We don't support Xen prior to 4.2.0.
25  */
26 
27 /* Xen 4.2 through 4.6 */
28 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
29 
30 typedef xc_interface xenforeignmemory_handle;
31 typedef xc_evtchn xenevtchn_handle;
32 typedef xc_gnttab xengnttab_handle;
33 typedef evtchn_port_or_error_t xenevtchn_port_or_error_t;
34 
35 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
36 #define xenevtchn_close(h) xc_evtchn_close(h)
37 #define xenevtchn_fd(h) xc_evtchn_fd(h)
38 #define xenevtchn_pending(h) xc_evtchn_pending(h)
39 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
40 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
41 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
42 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
43 
44 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
45 #define xengnttab_close(h) xc_gnttab_close(h)
46 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
47 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
48 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
49 #define xengnttab_map_grant_refs(h, c, d, r, p) \
50     xc_gnttab_map_grant_refs(h, c, d, r, p)
51 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
52     xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
53 
54 #define xenforeignmemory_open(l, f) xen_xc
55 #define xenforeignmemory_close(h)
56 
xenforeignmemory_map(xc_interface * h,uint32_t dom,int prot,size_t pages,const xen_pfn_t arr[],int err[])57 static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
58                                          int prot, size_t pages,
59                                          const xen_pfn_t arr[/*pages*/],
60                                          int err[/*pages*/])
61 {
62     if (err)
63         return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
64     else
65         return xc_map_foreign_pages(h, dom, prot, arr, pages);
66 }
67 
68 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
69 
70 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
71 
72 #include <xenevtchn.h>
73 #include <xengnttab.h>
74 #include <xenforeignmemory.h>
75 
76 #endif
77 
78 extern xenforeignmemory_handle *xen_fmem;
79 
80 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
81 
82 typedef xc_interface xendevicemodel_handle;
83 
84 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
85 
86 #undef XC_WANT_COMPAT_DEVICEMODEL_API
87 #include <xendevicemodel.h>
88 
89 #endif
90 
91 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100
92 
xendevicemodel_relocate_memory(xendevicemodel_handle * dmod,domid_t domid,uint32_t size,uint64_t src_gfn,uint64_t dst_gfn)93 static inline int xendevicemodel_relocate_memory(
94     xendevicemodel_handle *dmod, domid_t domid, uint32_t size, uint64_t src_gfn,
95     uint64_t dst_gfn)
96 {
97     uint32_t i;
98     int rc;
99 
100     for (i = 0; i < size; i++) {
101         unsigned long idx = src_gfn + i;
102         xen_pfn_t gpfn = dst_gfn + i;
103 
104         rc = xc_domain_add_to_physmap(xen_xc, domid, XENMAPSPACE_gmfn, idx,
105                                       gpfn);
106         if (rc) {
107             return rc;
108         }
109     }
110 
111     return 0;
112 }
113 
xendevicemodel_pin_memory_cacheattr(xendevicemodel_handle * dmod,domid_t domid,uint64_t start,uint64_t end,uint32_t type)114 static inline int xendevicemodel_pin_memory_cacheattr(
115     xendevicemodel_handle *dmod, domid_t domid, uint64_t start, uint64_t end,
116     uint32_t type)
117 {
118     return xc_domain_pin_memory_cacheattr(xen_xc, domid, start, end, type);
119 }
120 
121 typedef void xenforeignmemory_resource_handle;
122 
123 #define XENMEM_resource_ioreq_server 0
124 
125 #define XENMEM_resource_ioreq_server_frame_bufioreq 0
126 #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
127 
xenforeignmemory_map_resource(xenforeignmemory_handle * fmem,domid_t domid,unsigned int type,unsigned int id,unsigned long frame,unsigned long nr_frames,void ** paddr,int prot,int flags)128 static inline xenforeignmemory_resource_handle *xenforeignmemory_map_resource(
129     xenforeignmemory_handle *fmem, domid_t domid, unsigned int type,
130     unsigned int id, unsigned long frame, unsigned long nr_frames,
131     void **paddr, int prot, int flags)
132 {
133     errno = EOPNOTSUPP;
134     return NULL;
135 }
136 
137 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
138 
139 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
140 
141 #define XEN_COMPAT_PHYSMAP
xenforeignmemory_map2(xenforeignmemory_handle * h,uint32_t dom,void * addr,int prot,int flags,size_t pages,const xen_pfn_t arr[],int err[])142 static inline void *xenforeignmemory_map2(xenforeignmemory_handle *h,
143                                           uint32_t dom, void *addr,
144                                           int prot, int flags, size_t pages,
145                                           const xen_pfn_t arr[/*pages*/],
146                                           int err[/*pages*/])
147 {
148     assert(addr == NULL && flags == 0);
149     return xenforeignmemory_map(h, dom, prot, pages, arr, err);
150 }
151 
xentoolcore_restrict_all(domid_t domid)152 static inline int xentoolcore_restrict_all(domid_t domid)
153 {
154     errno = ENOTTY;
155     return -1;
156 }
157 
xendevicemodel_shutdown(xendevicemodel_handle * dmod,domid_t domid,unsigned int reason)158 static inline int xendevicemodel_shutdown(xendevicemodel_handle *dmod,
159                                           domid_t domid, unsigned int reason)
160 {
161     errno = ENOTTY;
162     return -1;
163 }
164 
165 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */
166 
167 #include <xentoolcore.h>
168 
169 #endif
170 
171 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
172 
xendevicemodel_open(struct xentoollog_logger * logger,unsigned int open_flags)173 static inline xendevicemodel_handle *xendevicemodel_open(
174     struct xentoollog_logger *logger, unsigned int open_flags)
175 {
176     return xen_xc;
177 }
178 
179 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
180 
xendevicemodel_create_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,int handle_bufioreq,ioservid_t * id)181 static inline int xendevicemodel_create_ioreq_server(
182     xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
183     ioservid_t *id)
184 {
185     return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq,
186                                       id);
187 }
188 
xendevicemodel_get_ioreq_server_info(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,xen_pfn_t * ioreq_pfn,xen_pfn_t * bufioreq_pfn,evtchn_port_t * bufioreq_port)189 static inline int xendevicemodel_get_ioreq_server_info(
190     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
191     xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
192     evtchn_port_t *bufioreq_port)
193 {
194     return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn,
195                                         bufioreq_pfn, bufioreq_port);
196 }
197 
xendevicemodel_map_io_range_to_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,int is_mmio,uint64_t start,uint64_t end)198 static inline int xendevicemodel_map_io_range_to_ioreq_server(
199     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
200     uint64_t start, uint64_t end)
201 {
202     return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio,
203                                                start, end);
204 }
205 
xendevicemodel_unmap_io_range_from_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,int is_mmio,uint64_t start,uint64_t end)206 static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
207     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
208     uint64_t start, uint64_t end)
209 {
210     return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio,
211                                                    start, end);
212 }
213 
xendevicemodel_map_pcidev_to_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,uint16_t segment,uint8_t bus,uint8_t device,uint8_t function)214 static inline int xendevicemodel_map_pcidev_to_ioreq_server(
215     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
216     uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
217 {
218     return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment,
219                                              bus, device, function);
220 }
221 
xendevicemodel_unmap_pcidev_from_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,uint16_t segment,uint8_t bus,uint8_t device,uint8_t function)222 static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
223     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
224     uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
225 {
226     return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment,
227                                                  bus, device, function);
228 }
229 
xendevicemodel_destroy_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id)230 static inline int xendevicemodel_destroy_ioreq_server(
231     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
232 {
233     return xc_hvm_destroy_ioreq_server(dmod, domid, id);
234 }
235 
xendevicemodel_set_ioreq_server_state(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,int enabled)236 static inline int xendevicemodel_set_ioreq_server_state(
237     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
238 {
239     return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
240 }
241 
242 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
243 
xendevicemodel_set_pci_intx_level(xendevicemodel_handle * dmod,domid_t domid,uint16_t segment,uint8_t bus,uint8_t device,uint8_t intx,unsigned int level)244 static inline int xendevicemodel_set_pci_intx_level(
245     xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
246     uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
247 {
248     return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device,
249                                      intx, level);
250 }
251 
xendevicemodel_set_isa_irq_level(xendevicemodel_handle * dmod,domid_t domid,uint8_t irq,unsigned int level)252 static inline int xendevicemodel_set_isa_irq_level(
253     xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
254     unsigned int level)
255 {
256     return xc_hvm_set_isa_irq_level(dmod, domid, irq, level);
257 }
258 
xendevicemodel_set_pci_link_route(xendevicemodel_handle * dmod,domid_t domid,uint8_t link,uint8_t irq)259 static inline int xendevicemodel_set_pci_link_route(
260     xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
261 {
262     return xc_hvm_set_pci_link_route(dmod, domid, link, irq);
263 }
264 
xendevicemodel_inject_msi(xendevicemodel_handle * dmod,domid_t domid,uint64_t msi_addr,uint32_t msi_data)265 static inline int xendevicemodel_inject_msi(
266     xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
267     uint32_t msi_data)
268 {
269     return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data);
270 }
271 
xendevicemodel_track_dirty_vram(xendevicemodel_handle * dmod,domid_t domid,uint64_t first_pfn,uint32_t nr,unsigned long * dirty_bitmap)272 static inline int xendevicemodel_track_dirty_vram(
273     xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
274     uint32_t nr, unsigned long *dirty_bitmap)
275 {
276     return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr,
277                                    dirty_bitmap);
278 }
279 
xendevicemodel_modified_memory(xendevicemodel_handle * dmod,domid_t domid,uint64_t first_pfn,uint32_t nr)280 static inline int xendevicemodel_modified_memory(
281     xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
282     uint32_t nr)
283 {
284     return xc_hvm_modified_memory(dmod, domid, first_pfn, nr);
285 }
286 
xendevicemodel_set_mem_type(xendevicemodel_handle * dmod,domid_t domid,hvmmem_type_t mem_type,uint64_t first_pfn,uint32_t nr)287 static inline int xendevicemodel_set_mem_type(
288     xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
289     uint64_t first_pfn, uint32_t nr)
290 {
291     return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr);
292 }
293 
294 #endif
295 
296 extern xendevicemodel_handle *xen_dmod;
297 
xen_set_mem_type(domid_t domid,hvmmem_type_t type,uint64_t first_pfn,uint32_t nr)298 static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
299                                    uint64_t first_pfn, uint32_t nr)
300 {
301     return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn,
302                                        nr);
303 }
304 
xen_set_pci_intx_level(domid_t domid,uint16_t segment,uint8_t bus,uint8_t device,uint8_t intx,unsigned int level)305 static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
306                                          uint8_t bus, uint8_t device,
307                                          uint8_t intx, unsigned int level)
308 {
309     return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus,
310                                              device, intx, level);
311 }
312 
xen_set_pci_link_route(domid_t domid,uint8_t link,uint8_t irq)313 static inline int xen_set_pci_link_route(domid_t domid, uint8_t link,
314                                          uint8_t irq)
315 {
316     return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq);
317 }
318 
xen_inject_msi(domid_t domid,uint64_t msi_addr,uint32_t msi_data)319 static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
320                                  uint32_t msi_data)
321 {
322     return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data);
323 }
324 
xen_set_isa_irq_level(domid_t domid,uint8_t irq,unsigned int level)325 static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
326                                         unsigned int level)
327 {
328     return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level);
329 }
330 
xen_track_dirty_vram(domid_t domid,uint64_t first_pfn,uint32_t nr,unsigned long * bitmap)331 static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
332                                        uint32_t nr, unsigned long *bitmap)
333 {
334     return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr,
335                                            bitmap);
336 }
337 
xen_modified_memory(domid_t domid,uint64_t first_pfn,uint32_t nr)338 static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
339                                       uint32_t nr)
340 {
341     return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr);
342 }
343 
xen_restrict(domid_t domid)344 static inline int xen_restrict(domid_t domid)
345 {
346     int rc;
347     rc = xentoolcore_restrict_all(domid);
348     trace_xen_domid_restrict(rc ? errno : 0);
349     return rc;
350 }
351 
352 void destroy_hvm_domain(bool reboot);
353 
354 /* shutdown/destroy current domain because of an error */
355 void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
356 
357 #ifdef HVM_PARAM_VMPORT_REGS_PFN
xen_get_vmport_regs_pfn(xc_interface * xc,domid_t dom,xen_pfn_t * vmport_regs_pfn)358 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
359                                           xen_pfn_t *vmport_regs_pfn)
360 {
361     int rc;
362     uint64_t value;
363     rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
364     if (rc >= 0) {
365         *vmport_regs_pfn = (xen_pfn_t) value;
366     }
367     return rc;
368 }
369 #else
xen_get_vmport_regs_pfn(xc_interface * xc,domid_t dom,xen_pfn_t * vmport_regs_pfn)370 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
371                                           xen_pfn_t *vmport_regs_pfn)
372 {
373     return -ENOSYS;
374 }
375 #endif
376 
377 /* Xen before 4.6 */
378 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
379 
380 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
381 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
382 #endif
383 
384 #endif
385 
xen_get_default_ioreq_server_info(domid_t dom,xen_pfn_t * ioreq_pfn,xen_pfn_t * bufioreq_pfn,evtchn_port_t * bufioreq_evtchn)386 static inline int xen_get_default_ioreq_server_info(domid_t dom,
387                                                     xen_pfn_t *ioreq_pfn,
388                                                     xen_pfn_t *bufioreq_pfn,
389                                                     evtchn_port_t
390                                                         *bufioreq_evtchn)
391 {
392     unsigned long param;
393     int rc;
394 
395     rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
396     if (rc < 0) {
397         fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
398         return -1;
399     }
400 
401     *ioreq_pfn = param;
402 
403     rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
404     if (rc < 0) {
405         fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
406         return -1;
407     }
408 
409     *bufioreq_pfn = param;
410 
411     rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
412                           &param);
413     if (rc < 0) {
414         fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
415         return -1;
416     }
417 
418     *bufioreq_evtchn = param;
419 
420     return 0;
421 }
422 
423 /* Xen before 4.5 */
424 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
425 
426 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
427 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
428 #endif
429 
430 #define IOREQ_TYPE_PCI_CONFIG 2
431 
432 typedef uint16_t ioservid_t;
433 
xen_map_memory_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)434 static inline void xen_map_memory_section(domid_t dom,
435                                           ioservid_t ioservid,
436                                           MemoryRegionSection *section)
437 {
438 }
439 
xen_unmap_memory_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)440 static inline void xen_unmap_memory_section(domid_t dom,
441                                             ioservid_t ioservid,
442                                             MemoryRegionSection *section)
443 {
444 }
445 
xen_map_io_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)446 static inline void xen_map_io_section(domid_t dom,
447                                       ioservid_t ioservid,
448                                       MemoryRegionSection *section)
449 {
450 }
451 
xen_unmap_io_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)452 static inline void xen_unmap_io_section(domid_t dom,
453                                         ioservid_t ioservid,
454                                         MemoryRegionSection *section)
455 {
456 }
457 
xen_map_pcidev(domid_t dom,ioservid_t ioservid,PCIDevice * pci_dev)458 static inline void xen_map_pcidev(domid_t dom,
459                                   ioservid_t ioservid,
460                                   PCIDevice *pci_dev)
461 {
462 }
463 
xen_unmap_pcidev(domid_t dom,ioservid_t ioservid,PCIDevice * pci_dev)464 static inline void xen_unmap_pcidev(domid_t dom,
465                                     ioservid_t ioservid,
466                                     PCIDevice *pci_dev)
467 {
468 }
469 
xen_create_ioreq_server(domid_t dom,ioservid_t * ioservid)470 static inline void xen_create_ioreq_server(domid_t dom,
471                                            ioservid_t *ioservid)
472 {
473 }
474 
xen_destroy_ioreq_server(domid_t dom,ioservid_t ioservid)475 static inline void xen_destroy_ioreq_server(domid_t dom,
476                                             ioservid_t ioservid)
477 {
478 }
479 
xen_get_ioreq_server_info(domid_t dom,ioservid_t ioservid,xen_pfn_t * ioreq_pfn,xen_pfn_t * bufioreq_pfn,evtchn_port_t * bufioreq_evtchn)480 static inline int xen_get_ioreq_server_info(domid_t dom,
481                                             ioservid_t ioservid,
482                                             xen_pfn_t *ioreq_pfn,
483                                             xen_pfn_t *bufioreq_pfn,
484                                             evtchn_port_t *bufioreq_evtchn)
485 {
486     return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
487                                              bufioreq_pfn,
488                                              bufioreq_evtchn);
489 }
490 
xen_set_ioreq_server_state(domid_t dom,ioservid_t ioservid,bool enable)491 static inline int xen_set_ioreq_server_state(domid_t dom,
492                                              ioservid_t ioservid,
493                                              bool enable)
494 {
495     return 0;
496 }
497 
498 /* Xen 4.5 */
499 #else
500 
501 static bool use_default_ioreq_server;
502 
xen_map_memory_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)503 static inline void xen_map_memory_section(domid_t dom,
504                                           ioservid_t ioservid,
505                                           MemoryRegionSection *section)
506 {
507     hwaddr start_addr = section->offset_within_address_space;
508     ram_addr_t size = int128_get64(section->size);
509     hwaddr end_addr = start_addr + size - 1;
510 
511     if (use_default_ioreq_server) {
512         return;
513     }
514 
515     trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
516     xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1,
517                                                 start_addr, end_addr);
518 }
519 
xen_unmap_memory_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)520 static inline void xen_unmap_memory_section(domid_t dom,
521                                             ioservid_t ioservid,
522                                             MemoryRegionSection *section)
523 {
524     hwaddr start_addr = section->offset_within_address_space;
525     ram_addr_t size = int128_get64(section->size);
526     hwaddr end_addr = start_addr + size - 1;
527 
528     if (use_default_ioreq_server) {
529         return;
530     }
531 
532     trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
533     xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
534                                                     1, start_addr, end_addr);
535 }
536 
xen_map_io_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)537 static inline void xen_map_io_section(domid_t dom,
538                                       ioservid_t ioservid,
539                                       MemoryRegionSection *section)
540 {
541     hwaddr start_addr = section->offset_within_address_space;
542     ram_addr_t size = int128_get64(section->size);
543     hwaddr end_addr = start_addr + size - 1;
544 
545     if (use_default_ioreq_server) {
546         return;
547     }
548 
549     trace_xen_map_portio_range(ioservid, start_addr, end_addr);
550     xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0,
551                                                 start_addr, end_addr);
552 }
553 
xen_unmap_io_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)554 static inline void xen_unmap_io_section(domid_t dom,
555                                         ioservid_t ioservid,
556                                         MemoryRegionSection *section)
557 {
558     hwaddr start_addr = section->offset_within_address_space;
559     ram_addr_t size = int128_get64(section->size);
560     hwaddr end_addr = start_addr + size - 1;
561 
562     if (use_default_ioreq_server) {
563         return;
564     }
565 
566     trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
567     xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
568                                                     0, start_addr, end_addr);
569 }
570 
xen_map_pcidev(domid_t dom,ioservid_t ioservid,PCIDevice * pci_dev)571 static inline void xen_map_pcidev(domid_t dom,
572                                   ioservid_t ioservid,
573                                   PCIDevice *pci_dev)
574 {
575     if (use_default_ioreq_server) {
576         return;
577     }
578 
579     trace_xen_map_pcidev(ioservid, pci_dev_bus_num(pci_dev),
580                          PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
581     xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0,
582                                               pci_dev_bus_num(pci_dev),
583                                               PCI_SLOT(pci_dev->devfn),
584                                               PCI_FUNC(pci_dev->devfn));
585 }
586 
xen_unmap_pcidev(domid_t dom,ioservid_t ioservid,PCIDevice * pci_dev)587 static inline void xen_unmap_pcidev(domid_t dom,
588                                     ioservid_t ioservid,
589                                     PCIDevice *pci_dev)
590 {
591     if (use_default_ioreq_server) {
592         return;
593     }
594 
595     trace_xen_unmap_pcidev(ioservid, pci_dev_bus_num(pci_dev),
596                            PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
597     xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0,
598                                                   pci_dev_bus_num(pci_dev),
599                                                   PCI_SLOT(pci_dev->devfn),
600                                                   PCI_FUNC(pci_dev->devfn));
601 }
602 
xen_create_ioreq_server(domid_t dom,ioservid_t * ioservid)603 static inline void xen_create_ioreq_server(domid_t dom,
604                                            ioservid_t *ioservid)
605 {
606     int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
607                                                 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
608                                                 ioservid);
609 
610     if (rc == 0) {
611         trace_xen_ioreq_server_create(*ioservid);
612         return;
613     }
614 
615     *ioservid = 0;
616     use_default_ioreq_server = true;
617     trace_xen_default_ioreq_server();
618 }
619 
xen_destroy_ioreq_server(domid_t dom,ioservid_t ioservid)620 static inline void xen_destroy_ioreq_server(domid_t dom,
621                                             ioservid_t ioservid)
622 {
623     if (use_default_ioreq_server) {
624         return;
625     }
626 
627     trace_xen_ioreq_server_destroy(ioservid);
628     xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid);
629 }
630 
xen_get_ioreq_server_info(domid_t dom,ioservid_t ioservid,xen_pfn_t * ioreq_pfn,xen_pfn_t * bufioreq_pfn,evtchn_port_t * bufioreq_evtchn)631 static inline int xen_get_ioreq_server_info(domid_t dom,
632                                             ioservid_t ioservid,
633                                             xen_pfn_t *ioreq_pfn,
634                                             xen_pfn_t *bufioreq_pfn,
635                                             evtchn_port_t *bufioreq_evtchn)
636 {
637     if (use_default_ioreq_server) {
638         return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
639                                                  bufioreq_pfn,
640                                                  bufioreq_evtchn);
641     }
642 
643     return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid,
644                                                 ioreq_pfn, bufioreq_pfn,
645                                                 bufioreq_evtchn);
646 }
647 
xen_set_ioreq_server_state(domid_t dom,ioservid_t ioservid,bool enable)648 static inline int xen_set_ioreq_server_state(domid_t dom,
649                                              ioservid_t ioservid,
650                                              bool enable)
651 {
652     if (use_default_ioreq_server) {
653         return 0;
654     }
655 
656     trace_xen_ioreq_server_state(ioservid, enable);
657     return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid,
658                                                  enable);
659 }
660 
661 #endif
662 
663 /* Xen before 4.8 */
664 
665 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
666 
667 struct xengnttab_grant_copy_segment {
668     union xengnttab_copy_ptr {
669         void *virt;
670         struct {
671             uint32_t ref;
672             uint16_t offset;
673             uint16_t domid;
674         } foreign;
675     } source, dest;
676     uint16_t len;
677     uint16_t flags;
678     int16_t status;
679 };
680 
681 typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t;
682 
xengnttab_grant_copy(xengnttab_handle * xgt,uint32_t count,xengnttab_grant_copy_segment_t * segs)683 static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
684                                        xengnttab_grant_copy_segment_t *segs)
685 {
686     return -ENOSYS;
687 }
688 #endif
689 
690 #endif /* QEMU_HW_XEN_COMMON_H */
691