1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
3 
4 /*
5  * If we have new enough libxenctrl then we do not want/need these compat
6  * interfaces, despite what the user supplied cflags might say. They
7  * must be undefined before including xenctrl.h
8  */
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
12 
13 #include <xenctrl.h>
14 #include <xenstore.h>
15 #include <xen/io/xenbus.h>
16 
17 #include "hw/hw.h"
18 #include "hw/xen/xen.h"
19 #include "hw/pci/pci.h"
20 #include "qemu/queue.h"
21 #include "hw/xen/trace.h"
22 
23 extern xc_interface *xen_xc;
24 
25 /*
26  * We don't support Xen prior to 4.2.0.
27  */
28 
29 /* Xen 4.2 through 4.6 */
30 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
31 
32 typedef xc_interface xenforeignmemory_handle;
33 typedef xc_evtchn xenevtchn_handle;
34 typedef xc_gnttab xengnttab_handle;
35 
36 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
37 #define xenevtchn_close(h) xc_evtchn_close(h)
38 #define xenevtchn_fd(h) xc_evtchn_fd(h)
39 #define xenevtchn_pending(h) xc_evtchn_pending(h)
40 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
41 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
42 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
43 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
44 
45 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
46 #define xengnttab_close(h) xc_gnttab_close(h)
47 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
48 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
49 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
50 #define xengnttab_map_grant_refs(h, c, d, r, p) \
51     xc_gnttab_map_grant_refs(h, c, d, r, p)
52 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
53     xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
54 
55 #define xenforeignmemory_open(l, f) xen_xc
56 #define xenforeignmemory_close(h)
57 
xenforeignmemory_map(xc_interface * h,uint32_t dom,int prot,size_t pages,const xen_pfn_t arr[],int err[])58 static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
59                                          int prot, size_t pages,
60                                          const xen_pfn_t arr[/*pages*/],
61                                          int err[/*pages*/])
62 {
63     if (err)
64         return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
65     else
66         return xc_map_foreign_pages(h, dom, prot, arr, pages);
67 }
68 
69 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
70 
71 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
72 
73 #include <xenevtchn.h>
74 #include <xengnttab.h>
75 #include <xenforeignmemory.h>
76 
77 #endif
78 
79 extern xenforeignmemory_handle *xen_fmem;
80 
81 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
82 
83 typedef xc_interface xendevicemodel_handle;
84 
85 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
86 
87 #undef XC_WANT_COMPAT_DEVICEMODEL_API
88 #include <xendevicemodel.h>
89 
90 #endif
91 
92 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100
93 
xendevicemodel_relocate_memory(xendevicemodel_handle * dmod,domid_t domid,uint32_t size,uint64_t src_gfn,uint64_t dst_gfn)94 static inline int xendevicemodel_relocate_memory(
95     xendevicemodel_handle *dmod, domid_t domid, uint32_t size, uint64_t src_gfn,
96     uint64_t dst_gfn)
97 {
98     uint32_t i;
99     int rc;
100 
101     for (i = 0; i < size; i++) {
102         unsigned long idx = src_gfn + i;
103         xen_pfn_t gpfn = dst_gfn + i;
104 
105         rc = xc_domain_add_to_physmap(xen_xc, domid, XENMAPSPACE_gmfn, idx,
106                                       gpfn);
107         if (rc) {
108             return rc;
109         }
110     }
111 
112     return 0;
113 }
114 
xendevicemodel_pin_memory_cacheattr(xendevicemodel_handle * dmod,domid_t domid,uint64_t start,uint64_t end,uint32_t type)115 static inline int xendevicemodel_pin_memory_cacheattr(
116     xendevicemodel_handle *dmod, domid_t domid, uint64_t start, uint64_t end,
117     uint32_t type)
118 {
119     return xc_domain_pin_memory_cacheattr(xen_xc, domid, start, end, type);
120 }
121 
122 typedef void xenforeignmemory_resource_handle;
123 
124 #define XENMEM_resource_ioreq_server 0
125 
126 #define XENMEM_resource_ioreq_server_frame_bufioreq 0
127 #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
128 
xenforeignmemory_map_resource(xenforeignmemory_handle * fmem,domid_t domid,unsigned int type,unsigned int id,unsigned long frame,unsigned long nr_frames,void ** paddr,int prot,int flags)129 static inline xenforeignmemory_resource_handle *xenforeignmemory_map_resource(
130     xenforeignmemory_handle *fmem, domid_t domid, unsigned int type,
131     unsigned int id, unsigned long frame, unsigned long nr_frames,
132     void **paddr, int prot, int flags)
133 {
134     errno = EOPNOTSUPP;
135     return NULL;
136 }
137 
138 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
139 
140 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
141 
142 #define XEN_COMPAT_PHYSMAP
xenforeignmemory_map2(xenforeignmemory_handle * h,uint32_t dom,void * addr,int prot,int flags,size_t pages,const xen_pfn_t arr[],int err[])143 static inline void *xenforeignmemory_map2(xenforeignmemory_handle *h,
144                                           uint32_t dom, void *addr,
145                                           int prot, int flags, size_t pages,
146                                           const xen_pfn_t arr[/*pages*/],
147                                           int err[/*pages*/])
148 {
149     assert(addr == NULL && flags == 0);
150     return xenforeignmemory_map(h, dom, prot, pages, arr, err);
151 }
152 
xentoolcore_restrict_all(domid_t domid)153 static inline int xentoolcore_restrict_all(domid_t domid)
154 {
155     errno = ENOTTY;
156     return -1;
157 }
158 
xendevicemodel_shutdown(xendevicemodel_handle * dmod,domid_t domid,unsigned int reason)159 static inline int xendevicemodel_shutdown(xendevicemodel_handle *dmod,
160                                           domid_t domid, unsigned int reason)
161 {
162     errno = ENOTTY;
163     return -1;
164 }
165 
166 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */
167 
168 #include <xentoolcore.h>
169 
170 #endif
171 
172 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
173 
xendevicemodel_open(struct xentoollog_logger * logger,unsigned int open_flags)174 static inline xendevicemodel_handle *xendevicemodel_open(
175     struct xentoollog_logger *logger, unsigned int open_flags)
176 {
177     return xen_xc;
178 }
179 
180 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
181 
xendevicemodel_create_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,int handle_bufioreq,ioservid_t * id)182 static inline int xendevicemodel_create_ioreq_server(
183     xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
184     ioservid_t *id)
185 {
186     return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq,
187                                       id);
188 }
189 
xendevicemodel_get_ioreq_server_info(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,xen_pfn_t * ioreq_pfn,xen_pfn_t * bufioreq_pfn,evtchn_port_t * bufioreq_port)190 static inline int xendevicemodel_get_ioreq_server_info(
191     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
192     xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
193     evtchn_port_t *bufioreq_port)
194 {
195     return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn,
196                                         bufioreq_pfn, bufioreq_port);
197 }
198 
xendevicemodel_map_io_range_to_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,int is_mmio,uint64_t start,uint64_t end)199 static inline int xendevicemodel_map_io_range_to_ioreq_server(
200     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
201     uint64_t start, uint64_t end)
202 {
203     return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio,
204                                                start, end);
205 }
206 
xendevicemodel_unmap_io_range_from_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,int is_mmio,uint64_t start,uint64_t end)207 static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
208     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
209     uint64_t start, uint64_t end)
210 {
211     return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio,
212                                                    start, end);
213 }
214 
xendevicemodel_map_pcidev_to_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,uint16_t segment,uint8_t bus,uint8_t device,uint8_t function)215 static inline int xendevicemodel_map_pcidev_to_ioreq_server(
216     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
217     uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
218 {
219     return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment,
220                                              bus, device, function);
221 }
222 
xendevicemodel_unmap_pcidev_from_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,uint16_t segment,uint8_t bus,uint8_t device,uint8_t function)223 static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
224     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
225     uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
226 {
227     return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment,
228                                                  bus, device, function);
229 }
230 
xendevicemodel_destroy_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id)231 static inline int xendevicemodel_destroy_ioreq_server(
232     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
233 {
234     return xc_hvm_destroy_ioreq_server(dmod, domid, id);
235 }
236 
xendevicemodel_set_ioreq_server_state(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,int enabled)237 static inline int xendevicemodel_set_ioreq_server_state(
238     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
239 {
240     return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
241 }
242 
243 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
244 
xendevicemodel_set_pci_intx_level(xendevicemodel_handle * dmod,domid_t domid,uint16_t segment,uint8_t bus,uint8_t device,uint8_t intx,unsigned int level)245 static inline int xendevicemodel_set_pci_intx_level(
246     xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
247     uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
248 {
249     return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device,
250                                      intx, level);
251 }
252 
xendevicemodel_set_isa_irq_level(xendevicemodel_handle * dmod,domid_t domid,uint8_t irq,unsigned int level)253 static inline int xendevicemodel_set_isa_irq_level(
254     xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
255     unsigned int level)
256 {
257     return xc_hvm_set_isa_irq_level(dmod, domid, irq, level);
258 }
259 
xendevicemodel_set_pci_link_route(xendevicemodel_handle * dmod,domid_t domid,uint8_t link,uint8_t irq)260 static inline int xendevicemodel_set_pci_link_route(
261     xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
262 {
263     return xc_hvm_set_pci_link_route(dmod, domid, link, irq);
264 }
265 
xendevicemodel_inject_msi(xendevicemodel_handle * dmod,domid_t domid,uint64_t msi_addr,uint32_t msi_data)266 static inline int xendevicemodel_inject_msi(
267     xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
268     uint32_t msi_data)
269 {
270     return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data);
271 }
272 
xendevicemodel_track_dirty_vram(xendevicemodel_handle * dmod,domid_t domid,uint64_t first_pfn,uint32_t nr,unsigned long * dirty_bitmap)273 static inline int xendevicemodel_track_dirty_vram(
274     xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
275     uint32_t nr, unsigned long *dirty_bitmap)
276 {
277     return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr,
278                                    dirty_bitmap);
279 }
280 
xendevicemodel_modified_memory(xendevicemodel_handle * dmod,domid_t domid,uint64_t first_pfn,uint32_t nr)281 static inline int xendevicemodel_modified_memory(
282     xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
283     uint32_t nr)
284 {
285     return xc_hvm_modified_memory(dmod, domid, first_pfn, nr);
286 }
287 
xendevicemodel_set_mem_type(xendevicemodel_handle * dmod,domid_t domid,hvmmem_type_t mem_type,uint64_t first_pfn,uint32_t nr)288 static inline int xendevicemodel_set_mem_type(
289     xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
290     uint64_t first_pfn, uint32_t nr)
291 {
292     return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr);
293 }
294 
295 #endif
296 
297 extern xendevicemodel_handle *xen_dmod;
298 
xen_set_mem_type(domid_t domid,hvmmem_type_t type,uint64_t first_pfn,uint32_t nr)299 static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
300                                    uint64_t first_pfn, uint32_t nr)
301 {
302     return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn,
303                                        nr);
304 }
305 
xen_set_pci_intx_level(domid_t domid,uint16_t segment,uint8_t bus,uint8_t device,uint8_t intx,unsigned int level)306 static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
307                                          uint8_t bus, uint8_t device,
308                                          uint8_t intx, unsigned int level)
309 {
310     return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus,
311                                              device, intx, level);
312 }
313 
xen_set_pci_link_route(domid_t domid,uint8_t link,uint8_t irq)314 static inline int xen_set_pci_link_route(domid_t domid, uint8_t link,
315                                          uint8_t irq)
316 {
317     return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq);
318 }
319 
xen_inject_msi(domid_t domid,uint64_t msi_addr,uint32_t msi_data)320 static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
321                                  uint32_t msi_data)
322 {
323     return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data);
324 }
325 
xen_set_isa_irq_level(domid_t domid,uint8_t irq,unsigned int level)326 static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
327                                         unsigned int level)
328 {
329     return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level);
330 }
331 
xen_track_dirty_vram(domid_t domid,uint64_t first_pfn,uint32_t nr,unsigned long * bitmap)332 static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
333                                        uint32_t nr, unsigned long *bitmap)
334 {
335     return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr,
336                                            bitmap);
337 }
338 
xen_modified_memory(domid_t domid,uint64_t first_pfn,uint32_t nr)339 static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
340                                       uint32_t nr)
341 {
342     return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr);
343 }
344 
xen_restrict(domid_t domid)345 static inline int xen_restrict(domid_t domid)
346 {
347     int rc;
348     rc = xentoolcore_restrict_all(domid);
349     trace_xen_domid_restrict(rc ? errno : 0);
350     return rc;
351 }
352 
353 void destroy_hvm_domain(bool reboot);
354 
355 /* shutdown/destroy current domain because of an error */
356 void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
357 
358 #ifdef HVM_PARAM_VMPORT_REGS_PFN
xen_get_vmport_regs_pfn(xc_interface * xc,domid_t dom,xen_pfn_t * vmport_regs_pfn)359 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
360                                           xen_pfn_t *vmport_regs_pfn)
361 {
362     int rc;
363     uint64_t value;
364     rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
365     if (rc >= 0) {
366         *vmport_regs_pfn = (xen_pfn_t) value;
367     }
368     return rc;
369 }
370 #else
xen_get_vmport_regs_pfn(xc_interface * xc,domid_t dom,xen_pfn_t * vmport_regs_pfn)371 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
372                                           xen_pfn_t *vmport_regs_pfn)
373 {
374     return -ENOSYS;
375 }
376 #endif
377 
378 /* Xen before 4.6 */
379 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
380 
381 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
382 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
383 #endif
384 
385 #endif
386 
xen_get_default_ioreq_server_info(domid_t dom,xen_pfn_t * ioreq_pfn,xen_pfn_t * bufioreq_pfn,evtchn_port_t * bufioreq_evtchn)387 static inline int xen_get_default_ioreq_server_info(domid_t dom,
388                                                     xen_pfn_t *ioreq_pfn,
389                                                     xen_pfn_t *bufioreq_pfn,
390                                                     evtchn_port_t
391                                                         *bufioreq_evtchn)
392 {
393     unsigned long param;
394     int rc;
395 
396     rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
397     if (rc < 0) {
398         fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
399         return -1;
400     }
401 
402     *ioreq_pfn = param;
403 
404     rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
405     if (rc < 0) {
406         fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
407         return -1;
408     }
409 
410     *bufioreq_pfn = param;
411 
412     rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
413                           &param);
414     if (rc < 0) {
415         fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
416         return -1;
417     }
418 
419     *bufioreq_evtchn = param;
420 
421     return 0;
422 }
423 
424 /* Xen before 4.5 */
425 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
426 
427 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
428 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
429 #endif
430 
431 #define IOREQ_TYPE_PCI_CONFIG 2
432 
433 typedef uint16_t ioservid_t;
434 
xen_map_memory_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)435 static inline void xen_map_memory_section(domid_t dom,
436                                           ioservid_t ioservid,
437                                           MemoryRegionSection *section)
438 {
439 }
440 
xen_unmap_memory_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)441 static inline void xen_unmap_memory_section(domid_t dom,
442                                             ioservid_t ioservid,
443                                             MemoryRegionSection *section)
444 {
445 }
446 
xen_map_io_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)447 static inline void xen_map_io_section(domid_t dom,
448                                       ioservid_t ioservid,
449                                       MemoryRegionSection *section)
450 {
451 }
452 
xen_unmap_io_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)453 static inline void xen_unmap_io_section(domid_t dom,
454                                         ioservid_t ioservid,
455                                         MemoryRegionSection *section)
456 {
457 }
458 
xen_map_pcidev(domid_t dom,ioservid_t ioservid,PCIDevice * pci_dev)459 static inline void xen_map_pcidev(domid_t dom,
460                                   ioservid_t ioservid,
461                                   PCIDevice *pci_dev)
462 {
463 }
464 
xen_unmap_pcidev(domid_t dom,ioservid_t ioservid,PCIDevice * pci_dev)465 static inline void xen_unmap_pcidev(domid_t dom,
466                                     ioservid_t ioservid,
467                                     PCIDevice *pci_dev)
468 {
469 }
470 
xen_create_ioreq_server(domid_t dom,ioservid_t * ioservid)471 static inline void xen_create_ioreq_server(domid_t dom,
472                                            ioservid_t *ioservid)
473 {
474 }
475 
xen_destroy_ioreq_server(domid_t dom,ioservid_t ioservid)476 static inline void xen_destroy_ioreq_server(domid_t dom,
477                                             ioservid_t ioservid)
478 {
479 }
480 
xen_get_ioreq_server_info(domid_t dom,ioservid_t ioservid,xen_pfn_t * ioreq_pfn,xen_pfn_t * bufioreq_pfn,evtchn_port_t * bufioreq_evtchn)481 static inline int xen_get_ioreq_server_info(domid_t dom,
482                                             ioservid_t ioservid,
483                                             xen_pfn_t *ioreq_pfn,
484                                             xen_pfn_t *bufioreq_pfn,
485                                             evtchn_port_t *bufioreq_evtchn)
486 {
487     return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
488                                              bufioreq_pfn,
489                                              bufioreq_evtchn);
490 }
491 
xen_set_ioreq_server_state(domid_t dom,ioservid_t ioservid,bool enable)492 static inline int xen_set_ioreq_server_state(domid_t dom,
493                                              ioservid_t ioservid,
494                                              bool enable)
495 {
496     return 0;
497 }
498 
499 /* Xen 4.5 */
500 #else
501 
502 static bool use_default_ioreq_server;
503 
xen_map_memory_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)504 static inline void xen_map_memory_section(domid_t dom,
505                                           ioservid_t ioservid,
506                                           MemoryRegionSection *section)
507 {
508     hwaddr start_addr = section->offset_within_address_space;
509     ram_addr_t size = int128_get64(section->size);
510     hwaddr end_addr = start_addr + size - 1;
511 
512     if (use_default_ioreq_server) {
513         return;
514     }
515 
516     trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
517     xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1,
518                                                 start_addr, end_addr);
519 }
520 
xen_unmap_memory_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)521 static inline void xen_unmap_memory_section(domid_t dom,
522                                             ioservid_t ioservid,
523                                             MemoryRegionSection *section)
524 {
525     hwaddr start_addr = section->offset_within_address_space;
526     ram_addr_t size = int128_get64(section->size);
527     hwaddr end_addr = start_addr + size - 1;
528 
529     if (use_default_ioreq_server) {
530         return;
531     }
532 
533     trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
534     xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
535                                                     1, start_addr, end_addr);
536 }
537 
xen_map_io_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)538 static inline void xen_map_io_section(domid_t dom,
539                                       ioservid_t ioservid,
540                                       MemoryRegionSection *section)
541 {
542     hwaddr start_addr = section->offset_within_address_space;
543     ram_addr_t size = int128_get64(section->size);
544     hwaddr end_addr = start_addr + size - 1;
545 
546     if (use_default_ioreq_server) {
547         return;
548     }
549 
550     trace_xen_map_portio_range(ioservid, start_addr, end_addr);
551     xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0,
552                                                 start_addr, end_addr);
553 }
554 
xen_unmap_io_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)555 static inline void xen_unmap_io_section(domid_t dom,
556                                         ioservid_t ioservid,
557                                         MemoryRegionSection *section)
558 {
559     hwaddr start_addr = section->offset_within_address_space;
560     ram_addr_t size = int128_get64(section->size);
561     hwaddr end_addr = start_addr + size - 1;
562 
563     if (use_default_ioreq_server) {
564         return;
565     }
566 
567     trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
568     xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
569                                                     0, start_addr, end_addr);
570 }
571 
xen_map_pcidev(domid_t dom,ioservid_t ioservid,PCIDevice * pci_dev)572 static inline void xen_map_pcidev(domid_t dom,
573                                   ioservid_t ioservid,
574                                   PCIDevice *pci_dev)
575 {
576     if (use_default_ioreq_server) {
577         return;
578     }
579 
580     trace_xen_map_pcidev(ioservid, pci_dev_bus_num(pci_dev),
581                          PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
582     xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0,
583                                               pci_dev_bus_num(pci_dev),
584                                               PCI_SLOT(pci_dev->devfn),
585                                               PCI_FUNC(pci_dev->devfn));
586 }
587 
xen_unmap_pcidev(domid_t dom,ioservid_t ioservid,PCIDevice * pci_dev)588 static inline void xen_unmap_pcidev(domid_t dom,
589                                     ioservid_t ioservid,
590                                     PCIDevice *pci_dev)
591 {
592     if (use_default_ioreq_server) {
593         return;
594     }
595 
596     trace_xen_unmap_pcidev(ioservid, pci_dev_bus_num(pci_dev),
597                            PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
598     xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0,
599                                                   pci_dev_bus_num(pci_dev),
600                                                   PCI_SLOT(pci_dev->devfn),
601                                                   PCI_FUNC(pci_dev->devfn));
602 }
603 
xen_create_ioreq_server(domid_t dom,ioservid_t * ioservid)604 static inline void xen_create_ioreq_server(domid_t dom,
605                                            ioservid_t *ioservid)
606 {
607     int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
608                                                 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
609                                                 ioservid);
610 
611     if (rc == 0) {
612         trace_xen_ioreq_server_create(*ioservid);
613         return;
614     }
615 
616     *ioservid = 0;
617     use_default_ioreq_server = true;
618     trace_xen_default_ioreq_server();
619 }
620 
xen_destroy_ioreq_server(domid_t dom,ioservid_t ioservid)621 static inline void xen_destroy_ioreq_server(domid_t dom,
622                                             ioservid_t ioservid)
623 {
624     if (use_default_ioreq_server) {
625         return;
626     }
627 
628     trace_xen_ioreq_server_destroy(ioservid);
629     xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid);
630 }
631 
xen_get_ioreq_server_info(domid_t dom,ioservid_t ioservid,xen_pfn_t * ioreq_pfn,xen_pfn_t * bufioreq_pfn,evtchn_port_t * bufioreq_evtchn)632 static inline int xen_get_ioreq_server_info(domid_t dom,
633                                             ioservid_t ioservid,
634                                             xen_pfn_t *ioreq_pfn,
635                                             xen_pfn_t *bufioreq_pfn,
636                                             evtchn_port_t *bufioreq_evtchn)
637 {
638     if (use_default_ioreq_server) {
639         return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
640                                                  bufioreq_pfn,
641                                                  bufioreq_evtchn);
642     }
643 
644     return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid,
645                                                 ioreq_pfn, bufioreq_pfn,
646                                                 bufioreq_evtchn);
647 }
648 
xen_set_ioreq_server_state(domid_t dom,ioservid_t ioservid,bool enable)649 static inline int xen_set_ioreq_server_state(domid_t dom,
650                                              ioservid_t ioservid,
651                                              bool enable)
652 {
653     if (use_default_ioreq_server) {
654         return 0;
655     }
656 
657     trace_xen_ioreq_server_state(ioservid, enable);
658     return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid,
659                                                  enable);
660 }
661 
662 #endif
663 
664 #ifdef CONFIG_XEN_PV_DOMAIN_BUILD
665 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40700
xen_domain_create(xc_interface * xc,uint32_t ssidref,xen_domain_handle_t handle,uint32_t flags,uint32_t * pdomid)666 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
667                                     xen_domain_handle_t handle, uint32_t flags,
668                                     uint32_t *pdomid)
669 {
670     return xc_domain_create(xc, ssidref, handle, flags, pdomid);
671 }
672 #else
xen_domain_create(xc_interface * xc,uint32_t ssidref,xen_domain_handle_t handle,uint32_t flags,uint32_t * pdomid)673 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
674                                     xen_domain_handle_t handle, uint32_t flags,
675                                     uint32_t *pdomid)
676 {
677     return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL);
678 }
679 #endif
680 #endif
681 
682 /* Xen before 4.8 */
683 
684 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
685 
686 struct xengnttab_grant_copy_segment {
687     union xengnttab_copy_ptr {
688         void *virt;
689         struct {
690             uint32_t ref;
691             uint16_t offset;
692             uint16_t domid;
693         } foreign;
694     } source, dest;
695     uint16_t len;
696     uint16_t flags;
697     int16_t status;
698 };
699 
700 typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t;
701 
xengnttab_grant_copy(xengnttab_handle * xgt,uint32_t count,xengnttab_grant_copy_segment_t * segs)702 static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
703                                        xengnttab_grant_copy_segment_t *segs)
704 {
705     return -ENOSYS;
706 }
707 #endif
708 
709 #endif /* QEMU_HW_XEN_COMMON_H */
710