1 /*
2  * QEMU HAXM support
3  *
4  * Copyright (c) 2011 Intel Corporation
5  *  Written by:
6  *  Jiang Yunhong<yunhong.jiang@intel.com>
7  *
8  * This work is licensed under the terms of the GNU GPL, version 2 or later.
9  * See the COPYING file in the top-level directory.
10  *
11  */
12 
13 /* HAX module interface - darwin version */
14 #include "qemu/osdep.h"
15 #include <sys/ioctl.h>
16 
17 #include "sysemu/cpus.h"
18 #include "hax-accel-ops.h"
19 
hax_mod_open(void)20 hax_fd hax_mod_open(void)
21 {
22     int fd = open("/dev/HAX", O_RDWR);
23     if (fd == -1) {
24         fprintf(stderr, "Failed to open the hax module\n");
25     }
26 
27     qemu_set_cloexec(fd);
28 
29     return fd;
30 }
31 
hax_populate_ram(uint64_t va,uint64_t size)32 int hax_populate_ram(uint64_t va, uint64_t size)
33 {
34     int ret;
35 
36     if (!hax_global.vm || !hax_global.vm->fd) {
37         fprintf(stderr, "Allocate memory before vm create?\n");
38         return -EINVAL;
39     }
40 
41     if (hax_global.supports_64bit_ramblock) {
42         struct hax_ramblock_info ramblock = {
43             .start_va = va,
44             .size = size,
45             .reserved = 0
46         };
47 
48         ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_ADD_RAMBLOCK, &ramblock);
49     } else {
50         struct hax_alloc_ram_info info = {
51             .size = (uint32_t)size,
52             .pad = 0,
53             .va = va
54         };
55 
56         ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_ALLOC_RAM, &info);
57     }
58     if (ret < 0) {
59         fprintf(stderr, "Failed to register RAM block: ret=%d, va=0x%" PRIx64
60                 ", size=0x%" PRIx64 ", method=%s\n", ret, va, size,
61                 hax_global.supports_64bit_ramblock ? "new" : "legacy");
62         return ret;
63     }
64     return 0;
65 }
66 
hax_set_ram(uint64_t start_pa,uint32_t size,uint64_t host_va,int flags)67 int hax_set_ram(uint64_t start_pa, uint32_t size, uint64_t host_va, int flags)
68 {
69     struct hax_set_ram_info info;
70     int ret;
71 
72     info.pa_start = start_pa;
73     info.size = size;
74     info.va = host_va;
75     info.flags = (uint8_t) flags;
76 
77     ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_SET_RAM, &info);
78     if (ret < 0) {
79         return -errno;
80     }
81     return 0;
82 }
83 
hax_capability(struct hax_state * hax,struct hax_capabilityinfo * cap)84 int hax_capability(struct hax_state *hax, struct hax_capabilityinfo *cap)
85 {
86     int ret;
87 
88     ret = ioctl(hax->fd, HAX_IOCTL_CAPABILITY, cap);
89     if (ret == -1) {
90         fprintf(stderr, "Failed to get HAX capability\n");
91         return -errno;
92     }
93 
94     return 0;
95 }
96 
hax_mod_version(struct hax_state * hax,struct hax_module_version * version)97 int hax_mod_version(struct hax_state *hax, struct hax_module_version *version)
98 {
99     int ret;
100 
101     ret = ioctl(hax->fd, HAX_IOCTL_VERSION, version);
102     if (ret == -1) {
103         fprintf(stderr, "Failed to get HAX version\n");
104         return -errno;
105     }
106 
107     return 0;
108 }
109 
hax_vm_devfs_string(int vm_id)110 static char *hax_vm_devfs_string(int vm_id)
111 {
112     return g_strdup_printf("/dev/hax_vm/vm%02d", vm_id);
113 }
114 
hax_vcpu_devfs_string(int vm_id,int vcpu_id)115 static char *hax_vcpu_devfs_string(int vm_id, int vcpu_id)
116 {
117     return g_strdup_printf("/dev/hax_vm%02d/vcpu%02d", vm_id, vcpu_id);
118 }
119 
hax_host_create_vm(struct hax_state * hax,int * vmid)120 int hax_host_create_vm(struct hax_state *hax, int *vmid)
121 {
122     int ret;
123     int vm_id = 0;
124 
125     if (hax_invalid_fd(hax->fd)) {
126         return -EINVAL;
127     }
128 
129     if (hax->vm) {
130         return 0;
131     }
132 
133     ret = ioctl(hax->fd, HAX_IOCTL_CREATE_VM, &vm_id);
134     *vmid = vm_id;
135     return ret;
136 }
137 
hax_host_open_vm(struct hax_state * hax,int vm_id)138 hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id)
139 {
140     hax_fd fd;
141     char *vm_name = NULL;
142 
143     vm_name = hax_vm_devfs_string(vm_id);
144     if (!vm_name) {
145         return -1;
146     }
147 
148     fd = open(vm_name, O_RDWR);
149     g_free(vm_name);
150 
151     qemu_set_cloexec(fd);
152 
153     return fd;
154 }
155 
hax_notify_qemu_version(hax_fd vm_fd,struct hax_qemu_version * qversion)156 int hax_notify_qemu_version(hax_fd vm_fd, struct hax_qemu_version *qversion)
157 {
158     int ret;
159 
160     if (hax_invalid_fd(vm_fd)) {
161         return -EINVAL;
162     }
163 
164     ret = ioctl(vm_fd, HAX_VM_IOCTL_NOTIFY_QEMU_VERSION, qversion);
165 
166     if (ret < 0) {
167         fprintf(stderr, "Failed to notify qemu API version\n");
168         return ret;
169     }
170     return 0;
171 }
172 
173 /* Simply assume the size should be bigger than the hax_tunnel,
174  * since the hax_tunnel can be extended later with compatibility considered
175  */
hax_host_create_vcpu(hax_fd vm_fd,int vcpuid)176 int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid)
177 {
178     int ret;
179 
180     ret = ioctl(vm_fd, HAX_VM_IOCTL_VCPU_CREATE, &vcpuid);
181     if (ret < 0) {
182         fprintf(stderr, "Failed to create vcpu %x\n", vcpuid);
183     }
184 
185     return ret;
186 }
187 
hax_host_open_vcpu(int vmid,int vcpuid)188 hax_fd hax_host_open_vcpu(int vmid, int vcpuid)
189 {
190     char *devfs_path = NULL;
191     hax_fd fd;
192 
193     devfs_path = hax_vcpu_devfs_string(vmid, vcpuid);
194     if (!devfs_path) {
195         fprintf(stderr, "Failed to get the devfs\n");
196         return -EINVAL;
197     }
198 
199     fd = open(devfs_path, O_RDWR);
200     g_free(devfs_path);
201     if (fd < 0) {
202         fprintf(stderr, "Failed to open the vcpu devfs\n");
203     }
204     qemu_set_cloexec(fd);
205     return fd;
206 }
207 
hax_host_setup_vcpu_channel(struct hax_vcpu_state * vcpu)208 int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu)
209 {
210     int ret;
211     struct hax_tunnel_info info;
212 
213     ret = ioctl(vcpu->fd, HAX_VCPU_IOCTL_SETUP_TUNNEL, &info);
214     if (ret) {
215         fprintf(stderr, "Failed to setup the hax tunnel\n");
216         return ret;
217     }
218 
219     if (!valid_hax_tunnel_size(info.size)) {
220         fprintf(stderr, "Invalid hax tunnel size %x\n", info.size);
221         ret = -EINVAL;
222         return ret;
223     }
224 
225     vcpu->tunnel = (struct hax_tunnel *) (intptr_t) (info.va);
226     vcpu->iobuf = (unsigned char *) (intptr_t) (info.io_va);
227     return 0;
228 }
229 
hax_vcpu_run(struct hax_vcpu_state * vcpu)230 int hax_vcpu_run(struct hax_vcpu_state *vcpu)
231 {
232     return ioctl(vcpu->fd, HAX_VCPU_IOCTL_RUN, NULL);
233 }
234 
hax_sync_fpu(CPUArchState * env,struct fx_layout * fl,int set)235 int hax_sync_fpu(CPUArchState *env, struct fx_layout *fl, int set)
236 {
237     int ret, fd;
238 
239     fd = hax_vcpu_get_fd(env);
240     if (fd <= 0) {
241         return -1;
242     }
243 
244     if (set) {
245         ret = ioctl(fd, HAX_VCPU_IOCTL_SET_FPU, fl);
246     } else {
247         ret = ioctl(fd, HAX_VCPU_IOCTL_GET_FPU, fl);
248     }
249     return ret;
250 }
251 
hax_sync_msr(CPUArchState * env,struct hax_msr_data * msrs,int set)252 int hax_sync_msr(CPUArchState *env, struct hax_msr_data *msrs, int set)
253 {
254     int ret, fd;
255 
256     fd = hax_vcpu_get_fd(env);
257     if (fd <= 0) {
258         return -1;
259     }
260     if (set) {
261         ret = ioctl(fd, HAX_VCPU_IOCTL_SET_MSRS, msrs);
262     } else {
263         ret = ioctl(fd, HAX_VCPU_IOCTL_GET_MSRS, msrs);
264     }
265     return ret;
266 }
267 
hax_sync_vcpu_state(CPUArchState * env,struct vcpu_state_t * state,int set)268 int hax_sync_vcpu_state(CPUArchState *env, struct vcpu_state_t *state, int set)
269 {
270     int ret, fd;
271 
272     fd = hax_vcpu_get_fd(env);
273     if (fd <= 0) {
274         return -1;
275     }
276 
277     if (set) {
278         ret = ioctl(fd, HAX_VCPU_SET_REGS, state);
279     } else {
280         ret = ioctl(fd, HAX_VCPU_GET_REGS, state);
281     }
282     return ret;
283 }
284 
hax_inject_interrupt(CPUArchState * env,int vector)285 int hax_inject_interrupt(CPUArchState *env, int vector)
286 {
287     int fd;
288 
289     fd = hax_vcpu_get_fd(env);
290     if (fd <= 0) {
291         return -1;
292     }
293 
294     return ioctl(fd, HAX_VCPU_IOCTL_INTERRUPT, &vector);
295 }
296 
hax_kick_vcpu_thread(CPUState * cpu)297 void hax_kick_vcpu_thread(CPUState *cpu)
298 {
299     /*
300      * FIXME: race condition with the exit_request check in
301      * hax_vcpu_hax_exec
302      */
303     cpu->exit_request = 1;
304     cpus_kick_thread(cpu);
305 }
306