1 /*
2  * QEMU HAXM support
3  *
4  * Copyright (c) 2011 Intel Corporation
5  *  Written by:
6  *  Jiang Yunhong<yunhong.jiang@intel.com>
7  *
8  * This work is licensed under the terms of the GNU GPL, version 2 or later.
9  * See the COPYING file in the top-level directory.
10  *
11  */
12 
13 #include "qemu/osdep.h"
14 #include "cpu.h"
15 #include "hax-accel-ops.h"
16 
17 /*
18  * return 0 when success, -1 when driver not loaded,
19  * other negative value for other failure
20  */
hax_open_device(hax_fd * fd)21 static int hax_open_device(hax_fd *fd)
22 {
23     uint32_t errNum = 0;
24     HANDLE hDevice;
25 
26     if (!fd) {
27         return -2;
28     }
29 
30     hDevice = CreateFile("\\\\.\\HAX",
31                          GENERIC_READ | GENERIC_WRITE,
32                          0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
33 
34     if (hDevice == INVALID_HANDLE_VALUE) {
35         fprintf(stderr, "Failed to open the HAX device!\n");
36         errNum = GetLastError();
37         if (errNum == ERROR_FILE_NOT_FOUND) {
38             return -1;
39         }
40         return -2;
41     }
42     *fd = hDevice;
43     return 0;
44 }
45 
46 /* hax_fd hax_mod_open */
hax_mod_open(void)47  hax_fd hax_mod_open(void)
48 {
49     int ret;
50     hax_fd fd = NULL;
51 
52     ret = hax_open_device(&fd);
53     if (ret != 0) {
54         fprintf(stderr, "Open HAX device failed\n");
55     }
56 
57     return fd;
58 }
59 
hax_populate_ram(uint64_t va,uint64_t size)60 int hax_populate_ram(uint64_t va, uint64_t size)
61 {
62     int ret;
63     HANDLE hDeviceVM;
64     DWORD dSize = 0;
65 
66     if (!hax_global.vm || !hax_global.vm->fd) {
67         fprintf(stderr, "Allocate memory before vm create?\n");
68         return -EINVAL;
69     }
70 
71     hDeviceVM = hax_global.vm->fd;
72     if (hax_global.supports_64bit_ramblock) {
73         struct hax_ramblock_info ramblock = {
74             .start_va = va,
75             .size = size,
76             .reserved = 0
77         };
78 
79         ret = DeviceIoControl(hDeviceVM,
80                               HAX_VM_IOCTL_ADD_RAMBLOCK,
81                               &ramblock, sizeof(ramblock), NULL, 0, &dSize,
82                               (LPOVERLAPPED) NULL);
83     } else {
84         struct hax_alloc_ram_info info = {
85             .size = (uint32_t) size,
86             .pad = 0,
87             .va = va
88         };
89 
90         ret = DeviceIoControl(hDeviceVM,
91                               HAX_VM_IOCTL_ALLOC_RAM,
92                               &info, sizeof(info), NULL, 0, &dSize,
93                               (LPOVERLAPPED) NULL);
94     }
95 
96     if (!ret) {
97         fprintf(stderr, "Failed to register RAM block: va=0x%" PRIx64
98                 ", size=0x%" PRIx64 ", method=%s\n", va, size,
99                 hax_global.supports_64bit_ramblock ? "new" : "legacy");
100         return ret;
101     }
102 
103     return 0;
104 }
105 
hax_set_ram(uint64_t start_pa,uint32_t size,uint64_t host_va,int flags)106 int hax_set_ram(uint64_t start_pa, uint32_t size, uint64_t host_va, int flags)
107 {
108     struct hax_set_ram_info info;
109     HANDLE hDeviceVM = hax_global.vm->fd;
110     DWORD dSize = 0;
111     int ret;
112 
113     info.pa_start = start_pa;
114     info.size = size;
115     info.va = host_va;
116     info.flags = (uint8_t) flags;
117 
118     ret = DeviceIoControl(hDeviceVM, HAX_VM_IOCTL_SET_RAM,
119                           &info, sizeof(info), NULL, 0, &dSize,
120                           (LPOVERLAPPED) NULL);
121 
122     if (!ret) {
123         return -EFAULT;
124     } else {
125         return 0;
126     }
127 }
128 
hax_capability(struct hax_state * hax,struct hax_capabilityinfo * cap)129 int hax_capability(struct hax_state *hax, struct hax_capabilityinfo *cap)
130 {
131     int ret;
132     HANDLE hDevice = hax->fd;        /* handle to hax module */
133     DWORD dSize = 0;
134     DWORD err = 0;
135 
136     if (hax_invalid_fd(hDevice)) {
137         fprintf(stderr, "Invalid fd for hax device!\n");
138         return -ENODEV;
139     }
140 
141     ret = DeviceIoControl(hDevice, HAX_IOCTL_CAPABILITY, NULL, 0, cap,
142                           sizeof(*cap), &dSize, (LPOVERLAPPED) NULL);
143 
144     if (!ret) {
145         err = GetLastError();
146         if (err == ERROR_INSUFFICIENT_BUFFER || err == ERROR_MORE_DATA) {
147             fprintf(stderr, "hax capability is too long to hold.\n");
148         }
149         fprintf(stderr, "Failed to get Hax capability:%luu\n", err);
150         return -EFAULT;
151     } else {
152         return 0;
153     }
154 }
155 
hax_mod_version(struct hax_state * hax,struct hax_module_version * version)156 int hax_mod_version(struct hax_state *hax, struct hax_module_version *version)
157 {
158     int ret;
159     HANDLE hDevice = hax->fd; /* handle to hax module */
160     DWORD dSize = 0;
161     DWORD err = 0;
162 
163     if (hax_invalid_fd(hDevice)) {
164         fprintf(stderr, "Invalid fd for hax device!\n");
165         return -ENODEV;
166     }
167 
168     ret = DeviceIoControl(hDevice,
169                           HAX_IOCTL_VERSION,
170                           NULL, 0,
171                           version, sizeof(*version), &dSize,
172                           (LPOVERLAPPED) NULL);
173 
174     if (!ret) {
175         err = GetLastError();
176         if (err == ERROR_INSUFFICIENT_BUFFER || err == ERROR_MORE_DATA) {
177             fprintf(stderr, "hax module verion is too long to hold.\n");
178         }
179         fprintf(stderr, "Failed to get Hax module version:%lu\n", err);
180         return -EFAULT;
181     } else {
182         return 0;
183     }
184 }
185 
hax_vm_devfs_string(int vm_id)186 static char *hax_vm_devfs_string(int vm_id)
187 {
188     return g_strdup_printf("\\\\.\\hax_vm%02d", vm_id);
189 }
190 
hax_vcpu_devfs_string(int vm_id,int vcpu_id)191 static char *hax_vcpu_devfs_string(int vm_id, int vcpu_id)
192 {
193     return g_strdup_printf("\\\\.\\hax_vm%02d_vcpu%02d", vm_id, vcpu_id);
194 }
195 
hax_host_create_vm(struct hax_state * hax,int * vmid)196 int hax_host_create_vm(struct hax_state *hax, int *vmid)
197 {
198     int ret;
199     int vm_id = 0;
200     DWORD dSize = 0;
201 
202     if (hax_invalid_fd(hax->fd)) {
203         return -EINVAL;
204     }
205 
206     if (hax->vm) {
207         return 0;
208     }
209 
210     ret = DeviceIoControl(hax->fd,
211                           HAX_IOCTL_CREATE_VM,
212                           NULL, 0, &vm_id, sizeof(vm_id), &dSize,
213                           (LPOVERLAPPED) NULL);
214     if (!ret) {
215         fprintf(stderr, "Failed to create VM. Error code: %lu\n",
216                 GetLastError());
217         return -1;
218     }
219     *vmid = vm_id;
220     return 0;
221 }
222 
hax_host_open_vm(struct hax_state * hax,int vm_id)223 hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id)
224 {
225     char *vm_name = NULL;
226     hax_fd hDeviceVM;
227 
228     vm_name = hax_vm_devfs_string(vm_id);
229     if (!vm_name) {
230         fprintf(stderr, "Failed to open VM. VM name is null\n");
231         return INVALID_HANDLE_VALUE;
232     }
233 
234     hDeviceVM = CreateFile(vm_name,
235                            GENERIC_READ | GENERIC_WRITE,
236                            0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
237     if (hDeviceVM == INVALID_HANDLE_VALUE) {
238         fprintf(stderr, "Open the vm device error:%s, ec:%lu\n",
239                 vm_name, GetLastError());
240     }
241 
242     g_free(vm_name);
243     return hDeviceVM;
244 }
245 
hax_notify_qemu_version(hax_fd vm_fd,struct hax_qemu_version * qversion)246 int hax_notify_qemu_version(hax_fd vm_fd, struct hax_qemu_version *qversion)
247 {
248     int ret;
249     DWORD dSize = 0;
250     if (hax_invalid_fd(vm_fd)) {
251         return -EINVAL;
252     }
253     ret = DeviceIoControl(vm_fd,
254                           HAX_VM_IOCTL_NOTIFY_QEMU_VERSION,
255                           qversion, sizeof(struct hax_qemu_version),
256                           NULL, 0, &dSize, (LPOVERLAPPED) NULL);
257     if (!ret) {
258         fprintf(stderr, "Failed to notify qemu API version\n");
259         return -1;
260     }
261     return 0;
262 }
263 
hax_host_create_vcpu(hax_fd vm_fd,int vcpuid)264 int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid)
265 {
266     int ret;
267     DWORD dSize = 0;
268 
269     ret = DeviceIoControl(vm_fd,
270                           HAX_VM_IOCTL_VCPU_CREATE,
271                           &vcpuid, sizeof(vcpuid), NULL, 0, &dSize,
272                           (LPOVERLAPPED) NULL);
273     if (!ret) {
274         fprintf(stderr, "Failed to create vcpu %x\n", vcpuid);
275         return -1;
276     }
277 
278     return 0;
279 }
280 
hax_host_open_vcpu(int vmid,int vcpuid)281 hax_fd hax_host_open_vcpu(int vmid, int vcpuid)
282 {
283     char *devfs_path = NULL;
284     hax_fd hDeviceVCPU;
285 
286     devfs_path = hax_vcpu_devfs_string(vmid, vcpuid);
287     if (!devfs_path) {
288         fprintf(stderr, "Failed to get the devfs\n");
289         return INVALID_HANDLE_VALUE;
290     }
291 
292     hDeviceVCPU = CreateFile(devfs_path,
293                              GENERIC_READ | GENERIC_WRITE,
294                              0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL,
295                              NULL);
296 
297     if (hDeviceVCPU == INVALID_HANDLE_VALUE) {
298         fprintf(stderr, "Failed to open the vcpu devfs\n");
299     }
300     g_free(devfs_path);
301     return hDeviceVCPU;
302 }
303 
hax_host_setup_vcpu_channel(struct hax_vcpu_state * vcpu)304 int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu)
305 {
306     hax_fd hDeviceVCPU = vcpu->fd;
307     int ret;
308     struct hax_tunnel_info info;
309     DWORD dSize = 0;
310 
311     ret = DeviceIoControl(hDeviceVCPU,
312                           HAX_VCPU_IOCTL_SETUP_TUNNEL,
313                           NULL, 0, &info, sizeof(info), &dSize,
314                           (LPOVERLAPPED) NULL);
315     if (!ret) {
316         fprintf(stderr, "Failed to setup the hax tunnel\n");
317         return -1;
318     }
319 
320     if (!valid_hax_tunnel_size(info.size)) {
321         fprintf(stderr, "Invalid hax tunnel size %x\n", info.size);
322         ret = -EINVAL;
323         return ret;
324     }
325     vcpu->tunnel = (struct hax_tunnel *) (intptr_t) (info.va);
326     vcpu->iobuf = (unsigned char *) (intptr_t) (info.io_va);
327     return 0;
328 }
329 
hax_vcpu_run(struct hax_vcpu_state * vcpu)330 int hax_vcpu_run(struct hax_vcpu_state *vcpu)
331 {
332     int ret;
333     HANDLE hDeviceVCPU = vcpu->fd;
334     DWORD dSize = 0;
335 
336     ret = DeviceIoControl(hDeviceVCPU,
337                           HAX_VCPU_IOCTL_RUN,
338                           NULL, 0, NULL, 0, &dSize, (LPOVERLAPPED) NULL);
339     if (!ret) {
340         return -EFAULT;
341     } else {
342         return 0;
343     }
344 }
345 
hax_sync_fpu(CPUArchState * env,struct fx_layout * fl,int set)346 int hax_sync_fpu(CPUArchState *env, struct fx_layout *fl, int set)
347 {
348     int ret;
349     hax_fd fd;
350     HANDLE hDeviceVCPU;
351     DWORD dSize = 0;
352 
353     fd = hax_vcpu_get_fd(env);
354     if (hax_invalid_fd(fd)) {
355         return -1;
356     }
357 
358     hDeviceVCPU = fd;
359 
360     if (set) {
361         ret = DeviceIoControl(hDeviceVCPU,
362                               HAX_VCPU_IOCTL_SET_FPU,
363                               fl, sizeof(*fl), NULL, 0, &dSize,
364                               (LPOVERLAPPED) NULL);
365     } else {
366         ret = DeviceIoControl(hDeviceVCPU,
367                               HAX_VCPU_IOCTL_GET_FPU,
368                               NULL, 0, fl, sizeof(*fl), &dSize,
369                               (LPOVERLAPPED) NULL);
370     }
371     if (!ret) {
372         return -EFAULT;
373     } else {
374         return 0;
375     }
376 }
377 
hax_sync_msr(CPUArchState * env,struct hax_msr_data * msrs,int set)378 int hax_sync_msr(CPUArchState *env, struct hax_msr_data *msrs, int set)
379 {
380     int ret;
381     hax_fd fd;
382     HANDLE hDeviceVCPU;
383     DWORD dSize = 0;
384 
385     fd = hax_vcpu_get_fd(env);
386     if (hax_invalid_fd(fd)) {
387         return -1;
388     }
389     hDeviceVCPU = fd;
390 
391     if (set) {
392         ret = DeviceIoControl(hDeviceVCPU,
393                               HAX_VCPU_IOCTL_SET_MSRS,
394                               msrs, sizeof(*msrs),
395                               msrs, sizeof(*msrs), &dSize, (LPOVERLAPPED) NULL);
396     } else {
397         ret = DeviceIoControl(hDeviceVCPU,
398                               HAX_VCPU_IOCTL_GET_MSRS,
399                               msrs, sizeof(*msrs),
400                               msrs, sizeof(*msrs), &dSize, (LPOVERLAPPED) NULL);
401     }
402     if (!ret) {
403         return -EFAULT;
404     } else {
405         return 0;
406     }
407 }
408 
hax_sync_vcpu_state(CPUArchState * env,struct vcpu_state_t * state,int set)409 int hax_sync_vcpu_state(CPUArchState *env, struct vcpu_state_t *state, int set)
410 {
411     int ret;
412     hax_fd fd;
413     HANDLE hDeviceVCPU;
414     DWORD dSize;
415 
416     fd = hax_vcpu_get_fd(env);
417     if (hax_invalid_fd(fd)) {
418         return -1;
419     }
420 
421     hDeviceVCPU = fd;
422 
423     if (set) {
424         ret = DeviceIoControl(hDeviceVCPU,
425                               HAX_VCPU_SET_REGS,
426                               state, sizeof(*state),
427                               NULL, 0, &dSize, (LPOVERLAPPED) NULL);
428     } else {
429         ret = DeviceIoControl(hDeviceVCPU,
430                               HAX_VCPU_GET_REGS,
431                               NULL, 0,
432                               state, sizeof(*state), &dSize,
433                               (LPOVERLAPPED) NULL);
434     }
435     if (!ret) {
436         return -EFAULT;
437     } else {
438         return 0;
439     }
440 }
441 
hax_inject_interrupt(CPUArchState * env,int vector)442 int hax_inject_interrupt(CPUArchState *env, int vector)
443 {
444     int ret;
445     hax_fd fd;
446     HANDLE hDeviceVCPU;
447     DWORD dSize;
448 
449     fd = hax_vcpu_get_fd(env);
450     if (hax_invalid_fd(fd)) {
451         return -1;
452     }
453 
454     hDeviceVCPU = fd;
455 
456     ret = DeviceIoControl(hDeviceVCPU,
457                           HAX_VCPU_IOCTL_INTERRUPT,
458                           &vector, sizeof(vector), NULL, 0, &dSize,
459                           (LPOVERLAPPED) NULL);
460     if (!ret) {
461         return -EFAULT;
462     } else {
463         return 0;
464     }
465 }
466 
dummy_apc_func(ULONG_PTR unused)467 static void CALLBACK dummy_apc_func(ULONG_PTR unused)
468 {
469 }
470 
hax_kick_vcpu_thread(CPUState * cpu)471 void hax_kick_vcpu_thread(CPUState *cpu)
472 {
473     /*
474      * FIXME: race condition with the exit_request check in
475      * hax_vcpu_hax_exec
476      */
477     cpu->exit_request = 1;
478     if (!qemu_cpu_is_self(cpu)) {
479         if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
480             fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n",
481                     __func__, GetLastError());
482             exit(1);
483         }
484     }
485 }
486