xref: /qemu/include/hw/xen/xen_native.h (revision 370ed600)
1 #ifndef QEMU_HW_XEN_NATIVE_H
2 #define QEMU_HW_XEN_NATIVE_H
3 
4 #ifdef __XEN_INTERFACE_VERSION__
5 #error In Xen native files, include xen_native.h before other Xen headers
6 #endif
7 
8 /*
9  * If we have new enough libxenctrl then we do not want/need these compat
10  * interfaces, despite what the user supplied cflags might say. They
11  * must be undefined before including xenctrl.h
12  */
13 #undef XC_WANT_COMPAT_EVTCHN_API
14 #undef XC_WANT_COMPAT_GNTTAB_API
15 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
16 
17 #include <xenctrl.h>
18 #include <xenstore.h>
19 
20 #include "hw/xen/xen.h"
21 #include "hw/pci/pci_device.h"
22 #include "hw/xen/trace.h"
23 
24 extern xc_interface *xen_xc;
25 
26 /*
27  * We don't support Xen prior to 4.2.0.
28  */
29 
30 /* Xen 4.2 through 4.6 */
31 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
32 
33 typedef xc_interface xenforeignmemory_handle;
34 
35 #define xenforeignmemory_open(l, f) xen_xc
36 #define xenforeignmemory_close(h)
37 
38 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
39 
40 #include <xenforeignmemory.h>
41 
42 #endif
43 
44 extern xenforeignmemory_handle *xen_fmem;
45 
46 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
47 
48 typedef xc_interface xendevicemodel_handle;
49 
50 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
51 
52 #undef XC_WANT_COMPAT_DEVICEMODEL_API
53 #include <xendevicemodel.h>
54 
55 #endif
56 
57 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100
58 
59 static inline int xendevicemodel_relocate_memory(
60     xendevicemodel_handle *dmod, domid_t domid, uint32_t size, uint64_t src_gfn,
61     uint64_t dst_gfn)
62 {
63     uint32_t i;
64     int rc;
65 
66     for (i = 0; i < size; i++) {
67         unsigned long idx = src_gfn + i;
68         xen_pfn_t gpfn = dst_gfn + i;
69 
70         rc = xc_domain_add_to_physmap(xen_xc, domid, XENMAPSPACE_gmfn, idx,
71                                       gpfn);
72         if (rc) {
73             return rc;
74         }
75     }
76 
77     return 0;
78 }
79 
80 static inline int xendevicemodel_pin_memory_cacheattr(
81     xendevicemodel_handle *dmod, domid_t domid, uint64_t start, uint64_t end,
82     uint32_t type)
83 {
84     return xc_domain_pin_memory_cacheattr(xen_xc, domid, start, end, type);
85 }
86 
87 typedef void xenforeignmemory_resource_handle;
88 
89 #define XENMEM_resource_ioreq_server 0
90 
91 #define XENMEM_resource_ioreq_server_frame_bufioreq 0
92 #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
93 
94 static inline xenforeignmemory_resource_handle *xenforeignmemory_map_resource(
95     xenforeignmemory_handle *fmem, domid_t domid, unsigned int type,
96     unsigned int id, unsigned long frame, unsigned long nr_frames,
97     void **paddr, int prot, int flags)
98 {
99     errno = EOPNOTSUPP;
100     return NULL;
101 }
102 
103 static inline int xenforeignmemory_unmap_resource(
104     xenforeignmemory_handle *fmem, xenforeignmemory_resource_handle *fres)
105 {
106     return 0;
107 }
108 
109 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
110 
111 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
112 
113 #define XEN_COMPAT_PHYSMAP
114 static inline void *xenforeignmemory_map2(xenforeignmemory_handle *h,
115                                           uint32_t dom, void *addr,
116                                           int prot, int flags, size_t pages,
117                                           const xen_pfn_t arr[/*pages*/],
118                                           int err[/*pages*/])
119 {
120     assert(addr == NULL && flags == 0);
121     return xenforeignmemory_map(h, dom, prot, pages, arr, err);
122 }
123 
124 static inline int xentoolcore_restrict_all(domid_t domid)
125 {
126     errno = ENOTTY;
127     return -1;
128 }
129 
130 static inline int xendevicemodel_shutdown(xendevicemodel_handle *dmod,
131                                           domid_t domid, unsigned int reason)
132 {
133     errno = ENOTTY;
134     return -1;
135 }
136 
137 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */
138 
139 #include <xentoolcore.h>
140 
141 #endif
142 
143 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
144 
145 static inline xendevicemodel_handle *xendevicemodel_open(
146     struct xentoollog_logger *logger, unsigned int open_flags)
147 {
148     return xen_xc;
149 }
150 
151 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
152 
153 static inline int xendevicemodel_create_ioreq_server(
154     xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
155     ioservid_t *id)
156 {
157     return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq,
158                                       id);
159 }
160 
161 static inline int xendevicemodel_get_ioreq_server_info(
162     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
163     xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
164     evtchn_port_t *bufioreq_port)
165 {
166     return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn,
167                                         bufioreq_pfn, bufioreq_port);
168 }
169 
170 static inline int xendevicemodel_map_io_range_to_ioreq_server(
171     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
172     uint64_t start, uint64_t end)
173 {
174     return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio,
175                                                start, end);
176 }
177 
178 static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
179     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
180     uint64_t start, uint64_t end)
181 {
182     return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio,
183                                                    start, end);
184 }
185 
186 static inline int xendevicemodel_map_pcidev_to_ioreq_server(
187     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
188     uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
189 {
190     return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment,
191                                              bus, device, function);
192 }
193 
194 static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
195     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
196     uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
197 {
198     return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment,
199                                                  bus, device, function);
200 }
201 
202 static inline int xendevicemodel_destroy_ioreq_server(
203     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
204 {
205     return xc_hvm_destroy_ioreq_server(dmod, domid, id);
206 }
207 
208 static inline int xendevicemodel_set_ioreq_server_state(
209     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
210 {
211     return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
212 }
213 
214 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
215 
216 static inline int xendevicemodel_set_pci_intx_level(
217     xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
218     uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
219 {
220     return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device,
221                                      intx, level);
222 }
223 
224 static inline int xendevicemodel_set_isa_irq_level(
225     xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
226     unsigned int level)
227 {
228     return xc_hvm_set_isa_irq_level(dmod, domid, irq, level);
229 }
230 
231 static inline int xendevicemodel_set_pci_link_route(
232     xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
233 {
234     return xc_hvm_set_pci_link_route(dmod, domid, link, irq);
235 }
236 
237 static inline int xendevicemodel_inject_msi(
238     xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
239     uint32_t msi_data)
240 {
241     return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data);
242 }
243 
244 static inline int xendevicemodel_track_dirty_vram(
245     xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
246     uint32_t nr, unsigned long *dirty_bitmap)
247 {
248     return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr,
249                                    dirty_bitmap);
250 }
251 
252 static inline int xendevicemodel_modified_memory(
253     xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
254     uint32_t nr)
255 {
256     return xc_hvm_modified_memory(dmod, domid, first_pfn, nr);
257 }
258 
259 static inline int xendevicemodel_set_mem_type(
260     xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
261     uint64_t first_pfn, uint32_t nr)
262 {
263     return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr);
264 }
265 
266 #endif
267 
268 extern xendevicemodel_handle *xen_dmod;
269 
270 static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
271                                    uint64_t first_pfn, uint32_t nr)
272 {
273     return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn,
274                                        nr);
275 }
276 
277 static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
278                                          uint8_t bus, uint8_t device,
279                                          uint8_t intx, unsigned int level)
280 {
281     return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus,
282                                              device, intx, level);
283 }
284 
285 static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
286                                  uint32_t msi_data)
287 {
288     return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data);
289 }
290 
291 static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
292                                         unsigned int level)
293 {
294     return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level);
295 }
296 
297 static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
298                                        uint32_t nr, unsigned long *bitmap)
299 {
300     return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr,
301                                            bitmap);
302 }
303 
304 static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
305                                       uint32_t nr)
306 {
307     return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr);
308 }
309 
310 static inline int xen_restrict(domid_t domid)
311 {
312     int rc;
313     rc = xentoolcore_restrict_all(domid);
314     trace_xen_domid_restrict(rc ? errno : 0);
315     return rc;
316 }
317 
318 void destroy_hvm_domain(bool reboot);
319 
320 /* shutdown/destroy current domain because of an error */
321 void xen_shutdown_fatal_error(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
322 
323 #ifdef HVM_PARAM_VMPORT_REGS_PFN
324 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
325                                           xen_pfn_t *vmport_regs_pfn)
326 {
327     int rc;
328     uint64_t value;
329     rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
330     if (rc >= 0) {
331         *vmport_regs_pfn = (xen_pfn_t) value;
332     }
333     return rc;
334 }
335 #else
336 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
337                                           xen_pfn_t *vmport_regs_pfn)
338 {
339     return -ENOSYS;
340 }
341 #endif
342 
343 /* Xen before 4.6 */
344 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
345 
346 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
347 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
348 #endif
349 
350 #endif
351 
352 static inline int xen_get_default_ioreq_server_info(domid_t dom,
353                                                     xen_pfn_t *ioreq_pfn,
354                                                     xen_pfn_t *bufioreq_pfn,
355                                                     evtchn_port_t
356                                                         *bufioreq_evtchn)
357 {
358     unsigned long param;
359     int rc;
360 
361     rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
362     if (rc < 0) {
363         fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
364         return -1;
365     }
366 
367     *ioreq_pfn = param;
368 
369     rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
370     if (rc < 0) {
371         fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
372         return -1;
373     }
374 
375     *bufioreq_pfn = param;
376 
377     rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
378                           &param);
379     if (rc < 0) {
380         fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
381         return -1;
382     }
383 
384     *bufioreq_evtchn = param;
385 
386     return 0;
387 }
388 
389 /* Xen before 4.5 */
390 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
391 
392 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
393 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
394 #endif
395 
396 #define IOREQ_TYPE_PCI_CONFIG 2
397 
398 typedef uint16_t ioservid_t;
399 
400 static inline void xen_map_memory_section(domid_t dom,
401                                           ioservid_t ioservid,
402                                           MemoryRegionSection *section)
403 {
404 }
405 
406 static inline void xen_unmap_memory_section(domid_t dom,
407                                             ioservid_t ioservid,
408                                             MemoryRegionSection *section)
409 {
410 }
411 
412 static inline void xen_map_io_section(domid_t dom,
413                                       ioservid_t ioservid,
414                                       MemoryRegionSection *section)
415 {
416 }
417 
418 static inline void xen_unmap_io_section(domid_t dom,
419                                         ioservid_t ioservid,
420                                         MemoryRegionSection *section)
421 {
422 }
423 
424 static inline void xen_map_pcidev(domid_t dom,
425                                   ioservid_t ioservid,
426                                   PCIDevice *pci_dev)
427 {
428 }
429 
430 static inline void xen_unmap_pcidev(domid_t dom,
431                                     ioservid_t ioservid,
432                                     PCIDevice *pci_dev)
433 {
434 }
435 
436 static inline void xen_create_ioreq_server(domid_t dom,
437                                            ioservid_t *ioservid)
438 {
439 }
440 
441 static inline void xen_destroy_ioreq_server(domid_t dom,
442                                             ioservid_t ioservid)
443 {
444 }
445 
446 static inline int xen_get_ioreq_server_info(domid_t dom,
447                                             ioservid_t ioservid,
448                                             xen_pfn_t *ioreq_pfn,
449                                             xen_pfn_t *bufioreq_pfn,
450                                             evtchn_port_t *bufioreq_evtchn)
451 {
452     return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
453                                              bufioreq_pfn,
454                                              bufioreq_evtchn);
455 }
456 
457 static inline int xen_set_ioreq_server_state(domid_t dom,
458                                              ioservid_t ioservid,
459                                              bool enable)
460 {
461     return 0;
462 }
463 
464 /* Xen 4.5 */
465 #else
466 
467 static bool use_default_ioreq_server;
468 
469 static inline void xen_map_memory_section(domid_t dom,
470                                           ioservid_t ioservid,
471                                           MemoryRegionSection *section)
472 {
473     hwaddr start_addr = section->offset_within_address_space;
474     ram_addr_t size = int128_get64(section->size);
475     hwaddr end_addr = start_addr + size - 1;
476 
477     if (use_default_ioreq_server) {
478         return;
479     }
480 
481     trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
482     xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1,
483                                                 start_addr, end_addr);
484 }
485 
486 static inline void xen_unmap_memory_section(domid_t dom,
487                                             ioservid_t ioservid,
488                                             MemoryRegionSection *section)
489 {
490     hwaddr start_addr = section->offset_within_address_space;
491     ram_addr_t size = int128_get64(section->size);
492     hwaddr end_addr = start_addr + size - 1;
493 
494     if (use_default_ioreq_server) {
495         return;
496     }
497 
498     trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
499     xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
500                                                     1, start_addr, end_addr);
501 }
502 
503 static inline void xen_map_io_section(domid_t dom,
504                                       ioservid_t ioservid,
505                                       MemoryRegionSection *section)
506 {
507     hwaddr start_addr = section->offset_within_address_space;
508     ram_addr_t size = int128_get64(section->size);
509     hwaddr end_addr = start_addr + size - 1;
510 
511     if (use_default_ioreq_server) {
512         return;
513     }
514 
515     trace_xen_map_portio_range(ioservid, start_addr, end_addr);
516     xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0,
517                                                 start_addr, end_addr);
518 }
519 
520 static inline void xen_unmap_io_section(domid_t dom,
521                                         ioservid_t ioservid,
522                                         MemoryRegionSection *section)
523 {
524     hwaddr start_addr = section->offset_within_address_space;
525     ram_addr_t size = int128_get64(section->size);
526     hwaddr end_addr = start_addr + size - 1;
527 
528     if (use_default_ioreq_server) {
529         return;
530     }
531 
532     trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
533     xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
534                                                     0, start_addr, end_addr);
535 }
536 
537 static inline void xen_map_pcidev(domid_t dom,
538                                   ioservid_t ioservid,
539                                   PCIDevice *pci_dev)
540 {
541     if (use_default_ioreq_server) {
542         return;
543     }
544 
545     trace_xen_map_pcidev(ioservid, pci_dev_bus_num(pci_dev),
546                          PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
547     xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0,
548                                               pci_dev_bus_num(pci_dev),
549                                               PCI_SLOT(pci_dev->devfn),
550                                               PCI_FUNC(pci_dev->devfn));
551 }
552 
553 static inline void xen_unmap_pcidev(domid_t dom,
554                                     ioservid_t ioservid,
555                                     PCIDevice *pci_dev)
556 {
557     if (use_default_ioreq_server) {
558         return;
559     }
560 
561     trace_xen_unmap_pcidev(ioservid, pci_dev_bus_num(pci_dev),
562                            PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
563     xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0,
564                                                   pci_dev_bus_num(pci_dev),
565                                                   PCI_SLOT(pci_dev->devfn),
566                                                   PCI_FUNC(pci_dev->devfn));
567 }
568 
569 static inline void xen_create_ioreq_server(domid_t dom,
570                                            ioservid_t *ioservid)
571 {
572     int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
573                                                 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
574                                                 ioservid);
575 
576     if (rc == 0) {
577         trace_xen_ioreq_server_create(*ioservid);
578         return;
579     }
580 
581     *ioservid = 0;
582     use_default_ioreq_server = true;
583     trace_xen_default_ioreq_server();
584 }
585 
586 static inline void xen_destroy_ioreq_server(domid_t dom,
587                                             ioservid_t ioservid)
588 {
589     if (use_default_ioreq_server) {
590         return;
591     }
592 
593     trace_xen_ioreq_server_destroy(ioservid);
594     xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid);
595 }
596 
597 static inline int xen_get_ioreq_server_info(domid_t dom,
598                                             ioservid_t ioservid,
599                                             xen_pfn_t *ioreq_pfn,
600                                             xen_pfn_t *bufioreq_pfn,
601                                             evtchn_port_t *bufioreq_evtchn)
602 {
603     if (use_default_ioreq_server) {
604         return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
605                                                  bufioreq_pfn,
606                                                  bufioreq_evtchn);
607     }
608 
609     return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid,
610                                                 ioreq_pfn, bufioreq_pfn,
611                                                 bufioreq_evtchn);
612 }
613 
614 static inline int xen_set_ioreq_server_state(domid_t dom,
615                                              ioservid_t ioservid,
616                                              bool enable)
617 {
618     if (use_default_ioreq_server) {
619         return 0;
620     }
621 
622     trace_xen_ioreq_server_state(ioservid, enable);
623     return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid,
624                                                  enable);
625 }
626 
627 #endif
628 
629 #endif /* QEMU_HW_XEN_NATIVE_H */
630