1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
3 
4 /*
5  * If we have new enough libxenctrl then we do not want/need these compat
6  * interfaces, despite what the user supplied cflags might say. They
7  * must be undefined before including xenctrl.h
8  */
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
12 
13 #include <xenctrl.h>
14 #include <xenstore.h>
15 #include "hw/xen/interface/io/xenbus.h"
16 
17 #include "hw/xen/xen.h"
18 #include "hw/pci/pci.h"
19 #include "hw/xen/trace.h"
20 
21 extern xc_interface *xen_xc;
22 
23 /*
24  * We don't support Xen prior to 4.2.0.
25  */
26 
27 /* Xen 4.2 through 4.6 */
28 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
29 
30 typedef xc_interface xenforeignmemory_handle;
31 typedef xc_evtchn xenevtchn_handle;
32 typedef xc_gnttab xengnttab_handle;
33 typedef evtchn_port_or_error_t xenevtchn_port_or_error_t;
34 
35 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
36 #define xenevtchn_close(h) xc_evtchn_close(h)
37 #define xenevtchn_fd(h) xc_evtchn_fd(h)
38 #define xenevtchn_pending(h) xc_evtchn_pending(h)
39 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
40 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
41 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
42 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
43 
44 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
45 #define xengnttab_close(h) xc_gnttab_close(h)
46 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
47 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
48 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
49 #define xengnttab_map_grant_refs(h, c, d, r, p) \
50     xc_gnttab_map_grant_refs(h, c, d, r, p)
51 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
52     xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
53 
54 #define xenforeignmemory_open(l, f) xen_xc
55 #define xenforeignmemory_close(h)
56 
xenforeignmemory_map(xc_interface * h,uint32_t dom,int prot,size_t pages,const xen_pfn_t arr[],int err[])57 static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
58                                          int prot, size_t pages,
59                                          const xen_pfn_t arr[/*pages*/],
60                                          int err[/*pages*/])
61 {
62     if (err)
63         return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
64     else
65         return xc_map_foreign_pages(h, dom, prot, arr, pages);
66 }
67 
68 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
69 
70 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
71 
72 #include <xenevtchn.h>
73 #include <xengnttab.h>
74 #include <xenforeignmemory.h>
75 
76 #endif
77 
78 extern xenforeignmemory_handle *xen_fmem;
79 
80 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
81 
82 typedef xc_interface xendevicemodel_handle;
83 
84 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
85 
86 #undef XC_WANT_COMPAT_DEVICEMODEL_API
87 #include <xendevicemodel.h>
88 
89 #endif
90 
91 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100
92 
xendevicemodel_relocate_memory(xendevicemodel_handle * dmod,domid_t domid,uint32_t size,uint64_t src_gfn,uint64_t dst_gfn)93 static inline int xendevicemodel_relocate_memory(
94     xendevicemodel_handle *dmod, domid_t domid, uint32_t size, uint64_t src_gfn,
95     uint64_t dst_gfn)
96 {
97     uint32_t i;
98     int rc;
99 
100     for (i = 0; i < size; i++) {
101         unsigned long idx = src_gfn + i;
102         xen_pfn_t gpfn = dst_gfn + i;
103 
104         rc = xc_domain_add_to_physmap(xen_xc, domid, XENMAPSPACE_gmfn, idx,
105                                       gpfn);
106         if (rc) {
107             return rc;
108         }
109     }
110 
111     return 0;
112 }
113 
xendevicemodel_pin_memory_cacheattr(xendevicemodel_handle * dmod,domid_t domid,uint64_t start,uint64_t end,uint32_t type)114 static inline int xendevicemodel_pin_memory_cacheattr(
115     xendevicemodel_handle *dmod, domid_t domid, uint64_t start, uint64_t end,
116     uint32_t type)
117 {
118     return xc_domain_pin_memory_cacheattr(xen_xc, domid, start, end, type);
119 }
120 
121 typedef void xenforeignmemory_resource_handle;
122 
123 #define XENMEM_resource_ioreq_server 0
124 
125 #define XENMEM_resource_ioreq_server_frame_bufioreq 0
126 #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
127 
xenforeignmemory_map_resource(xenforeignmemory_handle * fmem,domid_t domid,unsigned int type,unsigned int id,unsigned long frame,unsigned long nr_frames,void ** paddr,int prot,int flags)128 static inline xenforeignmemory_resource_handle *xenforeignmemory_map_resource(
129     xenforeignmemory_handle *fmem, domid_t domid, unsigned int type,
130     unsigned int id, unsigned long frame, unsigned long nr_frames,
131     void **paddr, int prot, int flags)
132 {
133     errno = EOPNOTSUPP;
134     return NULL;
135 }
136 
xenforeignmemory_unmap_resource(xenforeignmemory_handle * fmem,xenforeignmemory_resource_handle * fres)137 static inline int xenforeignmemory_unmap_resource(
138     xenforeignmemory_handle *fmem, xenforeignmemory_resource_handle *fres)
139 {
140     return 0;
141 }
142 
143 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
144 
145 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
146 
147 #define XEN_COMPAT_PHYSMAP
xenforeignmemory_map2(xenforeignmemory_handle * h,uint32_t dom,void * addr,int prot,int flags,size_t pages,const xen_pfn_t arr[],int err[])148 static inline void *xenforeignmemory_map2(xenforeignmemory_handle *h,
149                                           uint32_t dom, void *addr,
150                                           int prot, int flags, size_t pages,
151                                           const xen_pfn_t arr[/*pages*/],
152                                           int err[/*pages*/])
153 {
154     assert(addr == NULL && flags == 0);
155     return xenforeignmemory_map(h, dom, prot, pages, arr, err);
156 }
157 
xentoolcore_restrict_all(domid_t domid)158 static inline int xentoolcore_restrict_all(domid_t domid)
159 {
160     errno = ENOTTY;
161     return -1;
162 }
163 
xendevicemodel_shutdown(xendevicemodel_handle * dmod,domid_t domid,unsigned int reason)164 static inline int xendevicemodel_shutdown(xendevicemodel_handle *dmod,
165                                           domid_t domid, unsigned int reason)
166 {
167     errno = ENOTTY;
168     return -1;
169 }
170 
171 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */
172 
173 #include <xentoolcore.h>
174 
175 #endif
176 
177 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
178 
xendevicemodel_open(struct xentoollog_logger * logger,unsigned int open_flags)179 static inline xendevicemodel_handle *xendevicemodel_open(
180     struct xentoollog_logger *logger, unsigned int open_flags)
181 {
182     return xen_xc;
183 }
184 
185 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
186 
xendevicemodel_create_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,int handle_bufioreq,ioservid_t * id)187 static inline int xendevicemodel_create_ioreq_server(
188     xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
189     ioservid_t *id)
190 {
191     return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq,
192                                       id);
193 }
194 
xendevicemodel_get_ioreq_server_info(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,xen_pfn_t * ioreq_pfn,xen_pfn_t * bufioreq_pfn,evtchn_port_t * bufioreq_port)195 static inline int xendevicemodel_get_ioreq_server_info(
196     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
197     xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
198     evtchn_port_t *bufioreq_port)
199 {
200     return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn,
201                                         bufioreq_pfn, bufioreq_port);
202 }
203 
xendevicemodel_map_io_range_to_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,int is_mmio,uint64_t start,uint64_t end)204 static inline int xendevicemodel_map_io_range_to_ioreq_server(
205     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
206     uint64_t start, uint64_t end)
207 {
208     return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio,
209                                                start, end);
210 }
211 
xendevicemodel_unmap_io_range_from_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,int is_mmio,uint64_t start,uint64_t end)212 static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
213     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
214     uint64_t start, uint64_t end)
215 {
216     return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio,
217                                                    start, end);
218 }
219 
xendevicemodel_map_pcidev_to_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,uint16_t segment,uint8_t bus,uint8_t device,uint8_t function)220 static inline int xendevicemodel_map_pcidev_to_ioreq_server(
221     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
222     uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
223 {
224     return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment,
225                                              bus, device, function);
226 }
227 
xendevicemodel_unmap_pcidev_from_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,uint16_t segment,uint8_t bus,uint8_t device,uint8_t function)228 static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
229     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
230     uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
231 {
232     return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment,
233                                                  bus, device, function);
234 }
235 
xendevicemodel_destroy_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id)236 static inline int xendevicemodel_destroy_ioreq_server(
237     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
238 {
239     return xc_hvm_destroy_ioreq_server(dmod, domid, id);
240 }
241 
xendevicemodel_set_ioreq_server_state(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,int enabled)242 static inline int xendevicemodel_set_ioreq_server_state(
243     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
244 {
245     return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
246 }
247 
248 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
249 
xendevicemodel_set_pci_intx_level(xendevicemodel_handle * dmod,domid_t domid,uint16_t segment,uint8_t bus,uint8_t device,uint8_t intx,unsigned int level)250 static inline int xendevicemodel_set_pci_intx_level(
251     xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
252     uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
253 {
254     return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device,
255                                      intx, level);
256 }
257 
xendevicemodel_set_isa_irq_level(xendevicemodel_handle * dmod,domid_t domid,uint8_t irq,unsigned int level)258 static inline int xendevicemodel_set_isa_irq_level(
259     xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
260     unsigned int level)
261 {
262     return xc_hvm_set_isa_irq_level(dmod, domid, irq, level);
263 }
264 
xendevicemodel_set_pci_link_route(xendevicemodel_handle * dmod,domid_t domid,uint8_t link,uint8_t irq)265 static inline int xendevicemodel_set_pci_link_route(
266     xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
267 {
268     return xc_hvm_set_pci_link_route(dmod, domid, link, irq);
269 }
270 
xendevicemodel_inject_msi(xendevicemodel_handle * dmod,domid_t domid,uint64_t msi_addr,uint32_t msi_data)271 static inline int xendevicemodel_inject_msi(
272     xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
273     uint32_t msi_data)
274 {
275     return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data);
276 }
277 
xendevicemodel_track_dirty_vram(xendevicemodel_handle * dmod,domid_t domid,uint64_t first_pfn,uint32_t nr,unsigned long * dirty_bitmap)278 static inline int xendevicemodel_track_dirty_vram(
279     xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
280     uint32_t nr, unsigned long *dirty_bitmap)
281 {
282     return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr,
283                                    dirty_bitmap);
284 }
285 
xendevicemodel_modified_memory(xendevicemodel_handle * dmod,domid_t domid,uint64_t first_pfn,uint32_t nr)286 static inline int xendevicemodel_modified_memory(
287     xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
288     uint32_t nr)
289 {
290     return xc_hvm_modified_memory(dmod, domid, first_pfn, nr);
291 }
292 
xendevicemodel_set_mem_type(xendevicemodel_handle * dmod,domid_t domid,hvmmem_type_t mem_type,uint64_t first_pfn,uint32_t nr)293 static inline int xendevicemodel_set_mem_type(
294     xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
295     uint64_t first_pfn, uint32_t nr)
296 {
297     return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr);
298 }
299 
300 #endif
301 
302 extern xendevicemodel_handle *xen_dmod;
303 
xen_set_mem_type(domid_t domid,hvmmem_type_t type,uint64_t first_pfn,uint32_t nr)304 static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
305                                    uint64_t first_pfn, uint32_t nr)
306 {
307     return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn,
308                                        nr);
309 }
310 
xen_set_pci_intx_level(domid_t domid,uint16_t segment,uint8_t bus,uint8_t device,uint8_t intx,unsigned int level)311 static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
312                                          uint8_t bus, uint8_t device,
313                                          uint8_t intx, unsigned int level)
314 {
315     return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus,
316                                              device, intx, level);
317 }
318 
xen_set_pci_link_route(domid_t domid,uint8_t link,uint8_t irq)319 static inline int xen_set_pci_link_route(domid_t domid, uint8_t link,
320                                          uint8_t irq)
321 {
322     return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq);
323 }
324 
xen_inject_msi(domid_t domid,uint64_t msi_addr,uint32_t msi_data)325 static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
326                                  uint32_t msi_data)
327 {
328     return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data);
329 }
330 
xen_set_isa_irq_level(domid_t domid,uint8_t irq,unsigned int level)331 static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
332                                         unsigned int level)
333 {
334     return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level);
335 }
336 
xen_track_dirty_vram(domid_t domid,uint64_t first_pfn,uint32_t nr,unsigned long * bitmap)337 static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
338                                        uint32_t nr, unsigned long *bitmap)
339 {
340     return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr,
341                                            bitmap);
342 }
343 
xen_modified_memory(domid_t domid,uint64_t first_pfn,uint32_t nr)344 static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
345                                       uint32_t nr)
346 {
347     return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr);
348 }
349 
xen_restrict(domid_t domid)350 static inline int xen_restrict(domid_t domid)
351 {
352     int rc;
353     rc = xentoolcore_restrict_all(domid);
354     trace_xen_domid_restrict(rc ? errno : 0);
355     return rc;
356 }
357 
358 void destroy_hvm_domain(bool reboot);
359 
360 /* shutdown/destroy current domain because of an error */
361 void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
362 
363 #ifdef HVM_PARAM_VMPORT_REGS_PFN
xen_get_vmport_regs_pfn(xc_interface * xc,domid_t dom,xen_pfn_t * vmport_regs_pfn)364 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
365                                           xen_pfn_t *vmport_regs_pfn)
366 {
367     int rc;
368     uint64_t value;
369     rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
370     if (rc >= 0) {
371         *vmport_regs_pfn = (xen_pfn_t) value;
372     }
373     return rc;
374 }
375 #else
xen_get_vmport_regs_pfn(xc_interface * xc,domid_t dom,xen_pfn_t * vmport_regs_pfn)376 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
377                                           xen_pfn_t *vmport_regs_pfn)
378 {
379     return -ENOSYS;
380 }
381 #endif
382 
383 /* Xen before 4.6 */
384 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
385 
386 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
387 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
388 #endif
389 
390 #endif
391 
xen_get_default_ioreq_server_info(domid_t dom,xen_pfn_t * ioreq_pfn,xen_pfn_t * bufioreq_pfn,evtchn_port_t * bufioreq_evtchn)392 static inline int xen_get_default_ioreq_server_info(domid_t dom,
393                                                     xen_pfn_t *ioreq_pfn,
394                                                     xen_pfn_t *bufioreq_pfn,
395                                                     evtchn_port_t
396                                                         *bufioreq_evtchn)
397 {
398     unsigned long param;
399     int rc;
400 
401     rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
402     if (rc < 0) {
403         fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
404         return -1;
405     }
406 
407     *ioreq_pfn = param;
408 
409     rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
410     if (rc < 0) {
411         fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
412         return -1;
413     }
414 
415     *bufioreq_pfn = param;
416 
417     rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
418                           &param);
419     if (rc < 0) {
420         fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
421         return -1;
422     }
423 
424     *bufioreq_evtchn = param;
425 
426     return 0;
427 }
428 
429 /* Xen before 4.5 */
430 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
431 
432 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
433 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
434 #endif
435 
436 #define IOREQ_TYPE_PCI_CONFIG 2
437 
438 typedef uint16_t ioservid_t;
439 
xen_map_memory_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)440 static inline void xen_map_memory_section(domid_t dom,
441                                           ioservid_t ioservid,
442                                           MemoryRegionSection *section)
443 {
444 }
445 
xen_unmap_memory_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)446 static inline void xen_unmap_memory_section(domid_t dom,
447                                             ioservid_t ioservid,
448                                             MemoryRegionSection *section)
449 {
450 }
451 
xen_map_io_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)452 static inline void xen_map_io_section(domid_t dom,
453                                       ioservid_t ioservid,
454                                       MemoryRegionSection *section)
455 {
456 }
457 
xen_unmap_io_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)458 static inline void xen_unmap_io_section(domid_t dom,
459                                         ioservid_t ioservid,
460                                         MemoryRegionSection *section)
461 {
462 }
463 
xen_map_pcidev(domid_t dom,ioservid_t ioservid,PCIDevice * pci_dev)464 static inline void xen_map_pcidev(domid_t dom,
465                                   ioservid_t ioservid,
466                                   PCIDevice *pci_dev)
467 {
468 }
469 
xen_unmap_pcidev(domid_t dom,ioservid_t ioservid,PCIDevice * pci_dev)470 static inline void xen_unmap_pcidev(domid_t dom,
471                                     ioservid_t ioservid,
472                                     PCIDevice *pci_dev)
473 {
474 }
475 
xen_create_ioreq_server(domid_t dom,ioservid_t * ioservid)476 static inline void xen_create_ioreq_server(domid_t dom,
477                                            ioservid_t *ioservid)
478 {
479 }
480 
xen_destroy_ioreq_server(domid_t dom,ioservid_t ioservid)481 static inline void xen_destroy_ioreq_server(domid_t dom,
482                                             ioservid_t ioservid)
483 {
484 }
485 
xen_get_ioreq_server_info(domid_t dom,ioservid_t ioservid,xen_pfn_t * ioreq_pfn,xen_pfn_t * bufioreq_pfn,evtchn_port_t * bufioreq_evtchn)486 static inline int xen_get_ioreq_server_info(domid_t dom,
487                                             ioservid_t ioservid,
488                                             xen_pfn_t *ioreq_pfn,
489                                             xen_pfn_t *bufioreq_pfn,
490                                             evtchn_port_t *bufioreq_evtchn)
491 {
492     return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
493                                              bufioreq_pfn,
494                                              bufioreq_evtchn);
495 }
496 
xen_set_ioreq_server_state(domid_t dom,ioservid_t ioservid,bool enable)497 static inline int xen_set_ioreq_server_state(domid_t dom,
498                                              ioservid_t ioservid,
499                                              bool enable)
500 {
501     return 0;
502 }
503 
504 /* Xen 4.5 */
505 #else
506 
507 static bool use_default_ioreq_server;
508 
xen_map_memory_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)509 static inline void xen_map_memory_section(domid_t dom,
510                                           ioservid_t ioservid,
511                                           MemoryRegionSection *section)
512 {
513     hwaddr start_addr = section->offset_within_address_space;
514     ram_addr_t size = int128_get64(section->size);
515     hwaddr end_addr = start_addr + size - 1;
516 
517     if (use_default_ioreq_server) {
518         return;
519     }
520 
521     trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
522     xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1,
523                                                 start_addr, end_addr);
524 }
525 
xen_unmap_memory_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)526 static inline void xen_unmap_memory_section(domid_t dom,
527                                             ioservid_t ioservid,
528                                             MemoryRegionSection *section)
529 {
530     hwaddr start_addr = section->offset_within_address_space;
531     ram_addr_t size = int128_get64(section->size);
532     hwaddr end_addr = start_addr + size - 1;
533 
534     if (use_default_ioreq_server) {
535         return;
536     }
537 
538     trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
539     xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
540                                                     1, start_addr, end_addr);
541 }
542 
xen_map_io_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)543 static inline void xen_map_io_section(domid_t dom,
544                                       ioservid_t ioservid,
545                                       MemoryRegionSection *section)
546 {
547     hwaddr start_addr = section->offset_within_address_space;
548     ram_addr_t size = int128_get64(section->size);
549     hwaddr end_addr = start_addr + size - 1;
550 
551     if (use_default_ioreq_server) {
552         return;
553     }
554 
555     trace_xen_map_portio_range(ioservid, start_addr, end_addr);
556     xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0,
557                                                 start_addr, end_addr);
558 }
559 
xen_unmap_io_section(domid_t dom,ioservid_t ioservid,MemoryRegionSection * section)560 static inline void xen_unmap_io_section(domid_t dom,
561                                         ioservid_t ioservid,
562                                         MemoryRegionSection *section)
563 {
564     hwaddr start_addr = section->offset_within_address_space;
565     ram_addr_t size = int128_get64(section->size);
566     hwaddr end_addr = start_addr + size - 1;
567 
568     if (use_default_ioreq_server) {
569         return;
570     }
571 
572     trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
573     xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
574                                                     0, start_addr, end_addr);
575 }
576 
xen_map_pcidev(domid_t dom,ioservid_t ioservid,PCIDevice * pci_dev)577 static inline void xen_map_pcidev(domid_t dom,
578                                   ioservid_t ioservid,
579                                   PCIDevice *pci_dev)
580 {
581     if (use_default_ioreq_server) {
582         return;
583     }
584 
585     trace_xen_map_pcidev(ioservid, pci_dev_bus_num(pci_dev),
586                          PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
587     xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0,
588                                               pci_dev_bus_num(pci_dev),
589                                               PCI_SLOT(pci_dev->devfn),
590                                               PCI_FUNC(pci_dev->devfn));
591 }
592 
xen_unmap_pcidev(domid_t dom,ioservid_t ioservid,PCIDevice * pci_dev)593 static inline void xen_unmap_pcidev(domid_t dom,
594                                     ioservid_t ioservid,
595                                     PCIDevice *pci_dev)
596 {
597     if (use_default_ioreq_server) {
598         return;
599     }
600 
601     trace_xen_unmap_pcidev(ioservid, pci_dev_bus_num(pci_dev),
602                            PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
603     xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0,
604                                                   pci_dev_bus_num(pci_dev),
605                                                   PCI_SLOT(pci_dev->devfn),
606                                                   PCI_FUNC(pci_dev->devfn));
607 }
608 
xen_create_ioreq_server(domid_t dom,ioservid_t * ioservid)609 static inline void xen_create_ioreq_server(domid_t dom,
610                                            ioservid_t *ioservid)
611 {
612     int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
613                                                 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
614                                                 ioservid);
615 
616     if (rc == 0) {
617         trace_xen_ioreq_server_create(*ioservid);
618         return;
619     }
620 
621     *ioservid = 0;
622     use_default_ioreq_server = true;
623     trace_xen_default_ioreq_server();
624 }
625 
xen_destroy_ioreq_server(domid_t dom,ioservid_t ioservid)626 static inline void xen_destroy_ioreq_server(domid_t dom,
627                                             ioservid_t ioservid)
628 {
629     if (use_default_ioreq_server) {
630         return;
631     }
632 
633     trace_xen_ioreq_server_destroy(ioservid);
634     xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid);
635 }
636 
xen_get_ioreq_server_info(domid_t dom,ioservid_t ioservid,xen_pfn_t * ioreq_pfn,xen_pfn_t * bufioreq_pfn,evtchn_port_t * bufioreq_evtchn)637 static inline int xen_get_ioreq_server_info(domid_t dom,
638                                             ioservid_t ioservid,
639                                             xen_pfn_t *ioreq_pfn,
640                                             xen_pfn_t *bufioreq_pfn,
641                                             evtchn_port_t *bufioreq_evtchn)
642 {
643     if (use_default_ioreq_server) {
644         return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
645                                                  bufioreq_pfn,
646                                                  bufioreq_evtchn);
647     }
648 
649     return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid,
650                                                 ioreq_pfn, bufioreq_pfn,
651                                                 bufioreq_evtchn);
652 }
653 
xen_set_ioreq_server_state(domid_t dom,ioservid_t ioservid,bool enable)654 static inline int xen_set_ioreq_server_state(domid_t dom,
655                                              ioservid_t ioservid,
656                                              bool enable)
657 {
658     if (use_default_ioreq_server) {
659         return 0;
660     }
661 
662     trace_xen_ioreq_server_state(ioservid, enable);
663     return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid,
664                                                  enable);
665 }
666 
667 #endif
668 
669 /* Xen before 4.8 */
670 
671 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
672 
673 struct xengnttab_grant_copy_segment {
674     union xengnttab_copy_ptr {
675         void *virt;
676         struct {
677             uint32_t ref;
678             uint16_t offset;
679             uint16_t domid;
680         } foreign;
681     } source, dest;
682     uint16_t len;
683     uint16_t flags;
684     int16_t status;
685 };
686 
687 typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t;
688 
xengnttab_grant_copy(xengnttab_handle * xgt,uint32_t count,xengnttab_grant_copy_segment_t * segs)689 static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
690                                        xengnttab_grant_copy_segment_t *segs)
691 {
692     return -ENOSYS;
693 }
694 #endif
695 
696 #endif /* QEMU_HW_XEN_COMMON_H */
697