xref: /qemu/include/hw/xen/xen_native.h (revision 6e0dc9d2)
1 #ifndef QEMU_HW_XEN_NATIVE_H
2 #define QEMU_HW_XEN_NATIVE_H
3 
4 #ifdef __XEN_INTERFACE_VERSION__
5 #error In Xen native files, include xen_native.h before other Xen headers
6 #endif
7 
8 /*
9  * If we have new enough libxenctrl then we do not want/need these compat
10  * interfaces, despite what the user supplied cflags might say. They
11  * must be undefined before including xenctrl.h
12  */
13 #undef XC_WANT_COMPAT_EVTCHN_API
14 #undef XC_WANT_COMPAT_GNTTAB_API
15 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
16 
17 #include <xenctrl.h>
18 #include <xenstore.h>
19 
20 #include "hw/xen/xen.h"
21 #include "hw/pci/pci_device.h"
22 #include "hw/xen/trace.h"
23 
24 extern xc_interface *xen_xc;
25 
26 /*
27  * We don't support Xen prior to 4.7.1.
28  */
29 
30 #include <xenforeignmemory.h>
31 
32 extern xenforeignmemory_handle *xen_fmem;
33 
34 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
35 
36 typedef xc_interface xendevicemodel_handle;
37 
38 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
39 
40 #undef XC_WANT_COMPAT_DEVICEMODEL_API
41 #include <xendevicemodel.h>
42 
43 #endif
44 
45 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100
46 
47 static inline int xendevicemodel_relocate_memory(
48     xendevicemodel_handle *dmod, domid_t domid, uint32_t size, uint64_t src_gfn,
49     uint64_t dst_gfn)
50 {
51     uint32_t i;
52     int rc;
53 
54     for (i = 0; i < size; i++) {
55         unsigned long idx = src_gfn + i;
56         xen_pfn_t gpfn = dst_gfn + i;
57 
58         rc = xc_domain_add_to_physmap(xen_xc, domid, XENMAPSPACE_gmfn, idx,
59                                       gpfn);
60         if (rc) {
61             return rc;
62         }
63     }
64 
65     return 0;
66 }
67 
68 static inline int xendevicemodel_pin_memory_cacheattr(
69     xendevicemodel_handle *dmod, domid_t domid, uint64_t start, uint64_t end,
70     uint32_t type)
71 {
72     return xc_domain_pin_memory_cacheattr(xen_xc, domid, start, end, type);
73 }
74 
75 typedef void xenforeignmemory_resource_handle;
76 
77 #define XENMEM_resource_ioreq_server 0
78 
79 #define XENMEM_resource_ioreq_server_frame_bufioreq 0
80 #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
81 
82 static inline xenforeignmemory_resource_handle *xenforeignmemory_map_resource(
83     xenforeignmemory_handle *fmem, domid_t domid, unsigned int type,
84     unsigned int id, unsigned long frame, unsigned long nr_frames,
85     void **paddr, int prot, int flags)
86 {
87     errno = EOPNOTSUPP;
88     return NULL;
89 }
90 
91 static inline int xenforeignmemory_unmap_resource(
92     xenforeignmemory_handle *fmem, xenforeignmemory_resource_handle *fres)
93 {
94     return 0;
95 }
96 
97 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
98 
99 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
100 
101 #define XEN_COMPAT_PHYSMAP
102 static inline void *xenforeignmemory_map2(xenforeignmemory_handle *h,
103                                           uint32_t dom, void *addr,
104                                           int prot, int flags, size_t pages,
105                                           const xen_pfn_t arr[/*pages*/],
106                                           int err[/*pages*/])
107 {
108     assert(addr == NULL && flags == 0);
109     return xenforeignmemory_map(h, dom, prot, pages, arr, err);
110 }
111 
112 static inline int xentoolcore_restrict_all(domid_t domid)
113 {
114     errno = ENOTTY;
115     return -1;
116 }
117 
118 static inline int xendevicemodel_shutdown(xendevicemodel_handle *dmod,
119                                           domid_t domid, unsigned int reason)
120 {
121     errno = ENOTTY;
122     return -1;
123 }
124 
125 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */
126 
127 #include <xentoolcore.h>
128 
129 #endif
130 
131 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
132 
133 static inline xendevicemodel_handle *xendevicemodel_open(
134     struct xentoollog_logger *logger, unsigned int open_flags)
135 {
136     return xen_xc;
137 }
138 
139 static inline int xendevicemodel_create_ioreq_server(
140     xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
141     ioservid_t *id)
142 {
143     return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq,
144                                       id);
145 }
146 
147 static inline int xendevicemodel_get_ioreq_server_info(
148     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
149     xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
150     evtchn_port_t *bufioreq_port)
151 {
152     return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn,
153                                         bufioreq_pfn, bufioreq_port);
154 }
155 
156 static inline int xendevicemodel_map_io_range_to_ioreq_server(
157     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
158     uint64_t start, uint64_t end)
159 {
160     return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio,
161                                                start, end);
162 }
163 
164 static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
165     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
166     uint64_t start, uint64_t end)
167 {
168     return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio,
169                                                    start, end);
170 }
171 
172 static inline int xendevicemodel_map_pcidev_to_ioreq_server(
173     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
174     uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
175 {
176     return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment,
177                                              bus, device, function);
178 }
179 
180 static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
181     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
182     uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
183 {
184     return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment,
185                                                  bus, device, function);
186 }
187 
188 static inline int xendevicemodel_destroy_ioreq_server(
189     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
190 {
191     return xc_hvm_destroy_ioreq_server(dmod, domid, id);
192 }
193 
194 static inline int xendevicemodel_set_ioreq_server_state(
195     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
196 {
197     return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
198 }
199 
200 static inline int xendevicemodel_set_pci_intx_level(
201     xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
202     uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
203 {
204     return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device,
205                                      intx, level);
206 }
207 
208 static inline int xendevicemodel_set_isa_irq_level(
209     xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
210     unsigned int level)
211 {
212     return xc_hvm_set_isa_irq_level(dmod, domid, irq, level);
213 }
214 
215 static inline int xendevicemodel_set_pci_link_route(
216     xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
217 {
218     return xc_hvm_set_pci_link_route(dmod, domid, link, irq);
219 }
220 
221 static inline int xendevicemodel_inject_msi(
222     xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
223     uint32_t msi_data)
224 {
225     return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data);
226 }
227 
228 static inline int xendevicemodel_track_dirty_vram(
229     xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
230     uint32_t nr, unsigned long *dirty_bitmap)
231 {
232     return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr,
233                                    dirty_bitmap);
234 }
235 
236 static inline int xendevicemodel_modified_memory(
237     xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
238     uint32_t nr)
239 {
240     return xc_hvm_modified_memory(dmod, domid, first_pfn, nr);
241 }
242 
243 static inline int xendevicemodel_set_mem_type(
244     xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
245     uint64_t first_pfn, uint32_t nr)
246 {
247     return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr);
248 }
249 
250 #endif
251 
252 extern xendevicemodel_handle *xen_dmod;
253 
254 static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
255                                    uint64_t first_pfn, uint32_t nr)
256 {
257     return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn,
258                                        nr);
259 }
260 
261 static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
262                                          uint8_t bus, uint8_t device,
263                                          uint8_t intx, unsigned int level)
264 {
265     return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus,
266                                              device, intx, level);
267 }
268 
269 static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
270                                  uint32_t msi_data)
271 {
272     return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data);
273 }
274 
275 static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
276                                         unsigned int level)
277 {
278     return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level);
279 }
280 
281 static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
282                                        uint32_t nr, unsigned long *bitmap)
283 {
284     return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr,
285                                            bitmap);
286 }
287 
288 static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
289                                       uint32_t nr)
290 {
291     return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr);
292 }
293 
294 static inline int xen_restrict(domid_t domid)
295 {
296     int rc;
297     rc = xentoolcore_restrict_all(domid);
298     trace_xen_domid_restrict(rc ? errno : 0);
299     return rc;
300 }
301 
302 void destroy_hvm_domain(bool reboot);
303 
304 /* shutdown/destroy current domain because of an error */
305 void xen_shutdown_fatal_error(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
306 
307 #ifdef HVM_PARAM_VMPORT_REGS_PFN
308 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
309                                           xen_pfn_t *vmport_regs_pfn)
310 {
311     int rc;
312     uint64_t value;
313     rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
314     if (rc >= 0) {
315         *vmport_regs_pfn = (xen_pfn_t) value;
316     }
317     return rc;
318 }
319 #else
320 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
321                                           xen_pfn_t *vmport_regs_pfn)
322 {
323     return -ENOSYS;
324 }
325 #endif
326 
327 static inline int xen_get_default_ioreq_server_info(domid_t dom,
328                                                     xen_pfn_t *ioreq_pfn,
329                                                     xen_pfn_t *bufioreq_pfn,
330                                                     evtchn_port_t
331                                                         *bufioreq_evtchn)
332 {
333     unsigned long param;
334     int rc;
335 
336     rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
337     if (rc < 0) {
338         fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
339         return -1;
340     }
341 
342     *ioreq_pfn = param;
343 
344     rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
345     if (rc < 0) {
346         fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
347         return -1;
348     }
349 
350     *bufioreq_pfn = param;
351 
352     rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
353                           &param);
354     if (rc < 0) {
355         fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
356         return -1;
357     }
358 
359     *bufioreq_evtchn = param;
360 
361     return 0;
362 }
363 
364 static bool use_default_ioreq_server;
365 
366 static inline void xen_map_memory_section(domid_t dom,
367                                           ioservid_t ioservid,
368                                           MemoryRegionSection *section)
369 {
370     hwaddr start_addr = section->offset_within_address_space;
371     ram_addr_t size = int128_get64(section->size);
372     hwaddr end_addr = start_addr + size - 1;
373 
374     if (use_default_ioreq_server) {
375         return;
376     }
377 
378     trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
379     xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1,
380                                                 start_addr, end_addr);
381 }
382 
383 static inline void xen_unmap_memory_section(domid_t dom,
384                                             ioservid_t ioservid,
385                                             MemoryRegionSection *section)
386 {
387     hwaddr start_addr = section->offset_within_address_space;
388     ram_addr_t size = int128_get64(section->size);
389     hwaddr end_addr = start_addr + size - 1;
390 
391     if (use_default_ioreq_server) {
392         return;
393     }
394 
395     trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
396     xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
397                                                     1, start_addr, end_addr);
398 }
399 
400 static inline void xen_map_io_section(domid_t dom,
401                                       ioservid_t ioservid,
402                                       MemoryRegionSection *section)
403 {
404     hwaddr start_addr = section->offset_within_address_space;
405     ram_addr_t size = int128_get64(section->size);
406     hwaddr end_addr = start_addr + size - 1;
407 
408     if (use_default_ioreq_server) {
409         return;
410     }
411 
412     trace_xen_map_portio_range(ioservid, start_addr, end_addr);
413     xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0,
414                                                 start_addr, end_addr);
415 }
416 
417 static inline void xen_unmap_io_section(domid_t dom,
418                                         ioservid_t ioservid,
419                                         MemoryRegionSection *section)
420 {
421     hwaddr start_addr = section->offset_within_address_space;
422     ram_addr_t size = int128_get64(section->size);
423     hwaddr end_addr = start_addr + size - 1;
424 
425     if (use_default_ioreq_server) {
426         return;
427     }
428 
429     trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
430     xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
431                                                     0, start_addr, end_addr);
432 }
433 
434 static inline void xen_map_pcidev(domid_t dom,
435                                   ioservid_t ioservid,
436                                   PCIDevice *pci_dev)
437 {
438     if (use_default_ioreq_server) {
439         return;
440     }
441 
442     trace_xen_map_pcidev(ioservid, pci_dev_bus_num(pci_dev),
443                          PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
444     xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0,
445                                               pci_dev_bus_num(pci_dev),
446                                               PCI_SLOT(pci_dev->devfn),
447                                               PCI_FUNC(pci_dev->devfn));
448 }
449 
450 static inline void xen_unmap_pcidev(domid_t dom,
451                                     ioservid_t ioservid,
452                                     PCIDevice *pci_dev)
453 {
454     if (use_default_ioreq_server) {
455         return;
456     }
457 
458     trace_xen_unmap_pcidev(ioservid, pci_dev_bus_num(pci_dev),
459                            PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
460     xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0,
461                                                   pci_dev_bus_num(pci_dev),
462                                                   PCI_SLOT(pci_dev->devfn),
463                                                   PCI_FUNC(pci_dev->devfn));
464 }
465 
466 static inline int xen_create_ioreq_server(domid_t dom,
467                                           ioservid_t *ioservid)
468 {
469     int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
470                                                 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
471                                                 ioservid);
472 
473     if (rc == 0) {
474         trace_xen_ioreq_server_create(*ioservid);
475         return rc;
476     }
477 
478     *ioservid = 0;
479     use_default_ioreq_server = true;
480     trace_xen_default_ioreq_server();
481 
482     return rc;
483 }
484 
485 static inline void xen_destroy_ioreq_server(domid_t dom,
486                                             ioservid_t ioservid)
487 {
488     if (use_default_ioreq_server) {
489         return;
490     }
491 
492     trace_xen_ioreq_server_destroy(ioservid);
493     xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid);
494 }
495 
496 static inline int xen_get_ioreq_server_info(domid_t dom,
497                                             ioservid_t ioservid,
498                                             xen_pfn_t *ioreq_pfn,
499                                             xen_pfn_t *bufioreq_pfn,
500                                             evtchn_port_t *bufioreq_evtchn)
501 {
502     if (use_default_ioreq_server) {
503         return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
504                                                  bufioreq_pfn,
505                                                  bufioreq_evtchn);
506     }
507 
508     return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid,
509                                                 ioreq_pfn, bufioreq_pfn,
510                                                 bufioreq_evtchn);
511 }
512 
513 static inline int xen_set_ioreq_server_state(domid_t dom,
514                                              ioservid_t ioservid,
515                                              bool enable)
516 {
517     if (use_default_ioreq_server) {
518         return 0;
519     }
520 
521     trace_xen_ioreq_server_state(ioservid, enable);
522     return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid,
523                                                  enable);
524 }
525 
526 #if CONFIG_XEN_CTRL_INTERFACE_VERSION <= 41500
527 static inline int xendevicemodel_set_irq_level(xendevicemodel_handle *dmod,
528                                                domid_t domid, uint32_t irq,
529                                                unsigned int level)
530 {
531     return 0;
532 }
533 #endif
534 
535 #if CONFIG_XEN_CTRL_INTERFACE_VERSION <= 41700
536 #define GUEST_VIRTIO_MMIO_BASE   xen_mk_ullong(0x02000000)
537 #define GUEST_VIRTIO_MMIO_SIZE   xen_mk_ullong(0x00100000)
538 #define GUEST_VIRTIO_MMIO_SPI_FIRST   33
539 #define GUEST_VIRTIO_MMIO_SPI_LAST    43
540 #endif
541 
542 #if defined(__i386__) || defined(__x86_64__)
543 #define GUEST_RAM_BANKS   2
544 #define GUEST_RAM0_BASE   0x40000000ULL /* 3GB of low RAM @ 1GB */
545 #define GUEST_RAM0_SIZE   0xc0000000ULL
546 #define GUEST_RAM1_BASE   0x0200000000ULL /* 1016GB of RAM @ 8GB */
547 #define GUEST_RAM1_SIZE   0xfe00000000ULL
548 #endif
549 
550 #endif /* QEMU_HW_XEN_NATIVE_H */
551