xref: /qemu/target/arm/kvm.c (revision fcf5ef2a)
1*fcf5ef2aSThomas Huth /*
2*fcf5ef2aSThomas Huth  * ARM implementation of KVM hooks
3*fcf5ef2aSThomas Huth  *
4*fcf5ef2aSThomas Huth  * Copyright Christoffer Dall 2009-2010
5*fcf5ef2aSThomas Huth  *
6*fcf5ef2aSThomas Huth  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7*fcf5ef2aSThomas Huth  * See the COPYING file in the top-level directory.
8*fcf5ef2aSThomas Huth  *
9*fcf5ef2aSThomas Huth  */
10*fcf5ef2aSThomas Huth 
11*fcf5ef2aSThomas Huth #include "qemu/osdep.h"
12*fcf5ef2aSThomas Huth #include <sys/ioctl.h>
13*fcf5ef2aSThomas Huth 
14*fcf5ef2aSThomas Huth #include <linux/kvm.h>
15*fcf5ef2aSThomas Huth 
16*fcf5ef2aSThomas Huth #include "qemu-common.h"
17*fcf5ef2aSThomas Huth #include "qemu/timer.h"
18*fcf5ef2aSThomas Huth #include "qemu/error-report.h"
19*fcf5ef2aSThomas Huth #include "sysemu/sysemu.h"
20*fcf5ef2aSThomas Huth #include "sysemu/kvm.h"
21*fcf5ef2aSThomas Huth #include "kvm_arm.h"
22*fcf5ef2aSThomas Huth #include "cpu.h"
23*fcf5ef2aSThomas Huth #include "internals.h"
24*fcf5ef2aSThomas Huth #include "hw/arm/arm.h"
25*fcf5ef2aSThomas Huth #include "exec/memattrs.h"
26*fcf5ef2aSThomas Huth #include "exec/address-spaces.h"
27*fcf5ef2aSThomas Huth #include "hw/boards.h"
28*fcf5ef2aSThomas Huth #include "qemu/log.h"
29*fcf5ef2aSThomas Huth 
30*fcf5ef2aSThomas Huth const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
31*fcf5ef2aSThomas Huth     KVM_CAP_LAST_INFO
32*fcf5ef2aSThomas Huth };
33*fcf5ef2aSThomas Huth 
34*fcf5ef2aSThomas Huth static bool cap_has_mp_state;
35*fcf5ef2aSThomas Huth 
36*fcf5ef2aSThomas Huth int kvm_arm_vcpu_init(CPUState *cs)
37*fcf5ef2aSThomas Huth {
38*fcf5ef2aSThomas Huth     ARMCPU *cpu = ARM_CPU(cs);
39*fcf5ef2aSThomas Huth     struct kvm_vcpu_init init;
40*fcf5ef2aSThomas Huth 
41*fcf5ef2aSThomas Huth     init.target = cpu->kvm_target;
42*fcf5ef2aSThomas Huth     memcpy(init.features, cpu->kvm_init_features, sizeof(init.features));
43*fcf5ef2aSThomas Huth 
44*fcf5ef2aSThomas Huth     return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
45*fcf5ef2aSThomas Huth }
46*fcf5ef2aSThomas Huth 
47*fcf5ef2aSThomas Huth bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
48*fcf5ef2aSThomas Huth                                       int *fdarray,
49*fcf5ef2aSThomas Huth                                       struct kvm_vcpu_init *init)
50*fcf5ef2aSThomas Huth {
51*fcf5ef2aSThomas Huth     int ret, kvmfd = -1, vmfd = -1, cpufd = -1;
52*fcf5ef2aSThomas Huth 
53*fcf5ef2aSThomas Huth     kvmfd = qemu_open("/dev/kvm", O_RDWR);
54*fcf5ef2aSThomas Huth     if (kvmfd < 0) {
55*fcf5ef2aSThomas Huth         goto err;
56*fcf5ef2aSThomas Huth     }
57*fcf5ef2aSThomas Huth     vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
58*fcf5ef2aSThomas Huth     if (vmfd < 0) {
59*fcf5ef2aSThomas Huth         goto err;
60*fcf5ef2aSThomas Huth     }
61*fcf5ef2aSThomas Huth     cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
62*fcf5ef2aSThomas Huth     if (cpufd < 0) {
63*fcf5ef2aSThomas Huth         goto err;
64*fcf5ef2aSThomas Huth     }
65*fcf5ef2aSThomas Huth 
66*fcf5ef2aSThomas Huth     if (!init) {
67*fcf5ef2aSThomas Huth         /* Caller doesn't want the VCPU to be initialized, so skip it */
68*fcf5ef2aSThomas Huth         goto finish;
69*fcf5ef2aSThomas Huth     }
70*fcf5ef2aSThomas Huth 
71*fcf5ef2aSThomas Huth     ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, init);
72*fcf5ef2aSThomas Huth     if (ret >= 0) {
73*fcf5ef2aSThomas Huth         ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
74*fcf5ef2aSThomas Huth         if (ret < 0) {
75*fcf5ef2aSThomas Huth             goto err;
76*fcf5ef2aSThomas Huth         }
77*fcf5ef2aSThomas Huth     } else if (cpus_to_try) {
78*fcf5ef2aSThomas Huth         /* Old kernel which doesn't know about the
79*fcf5ef2aSThomas Huth          * PREFERRED_TARGET ioctl: we know it will only support
80*fcf5ef2aSThomas Huth          * creating one kind of guest CPU which is its preferred
81*fcf5ef2aSThomas Huth          * CPU type.
82*fcf5ef2aSThomas Huth          */
83*fcf5ef2aSThomas Huth         while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) {
84*fcf5ef2aSThomas Huth             init->target = *cpus_to_try++;
85*fcf5ef2aSThomas Huth             memset(init->features, 0, sizeof(init->features));
86*fcf5ef2aSThomas Huth             ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
87*fcf5ef2aSThomas Huth             if (ret >= 0) {
88*fcf5ef2aSThomas Huth                 break;
89*fcf5ef2aSThomas Huth             }
90*fcf5ef2aSThomas Huth         }
91*fcf5ef2aSThomas Huth         if (ret < 0) {
92*fcf5ef2aSThomas Huth             goto err;
93*fcf5ef2aSThomas Huth         }
94*fcf5ef2aSThomas Huth     } else {
95*fcf5ef2aSThomas Huth         /* Treat a NULL cpus_to_try argument the same as an empty
96*fcf5ef2aSThomas Huth          * list, which means we will fail the call since this must
97*fcf5ef2aSThomas Huth          * be an old kernel which doesn't support PREFERRED_TARGET.
98*fcf5ef2aSThomas Huth          */
99*fcf5ef2aSThomas Huth         goto err;
100*fcf5ef2aSThomas Huth     }
101*fcf5ef2aSThomas Huth 
102*fcf5ef2aSThomas Huth finish:
103*fcf5ef2aSThomas Huth     fdarray[0] = kvmfd;
104*fcf5ef2aSThomas Huth     fdarray[1] = vmfd;
105*fcf5ef2aSThomas Huth     fdarray[2] = cpufd;
106*fcf5ef2aSThomas Huth 
107*fcf5ef2aSThomas Huth     return true;
108*fcf5ef2aSThomas Huth 
109*fcf5ef2aSThomas Huth err:
110*fcf5ef2aSThomas Huth     if (cpufd >= 0) {
111*fcf5ef2aSThomas Huth         close(cpufd);
112*fcf5ef2aSThomas Huth     }
113*fcf5ef2aSThomas Huth     if (vmfd >= 0) {
114*fcf5ef2aSThomas Huth         close(vmfd);
115*fcf5ef2aSThomas Huth     }
116*fcf5ef2aSThomas Huth     if (kvmfd >= 0) {
117*fcf5ef2aSThomas Huth         close(kvmfd);
118*fcf5ef2aSThomas Huth     }
119*fcf5ef2aSThomas Huth 
120*fcf5ef2aSThomas Huth     return false;
121*fcf5ef2aSThomas Huth }
122*fcf5ef2aSThomas Huth 
123*fcf5ef2aSThomas Huth void kvm_arm_destroy_scratch_host_vcpu(int *fdarray)
124*fcf5ef2aSThomas Huth {
125*fcf5ef2aSThomas Huth     int i;
126*fcf5ef2aSThomas Huth 
127*fcf5ef2aSThomas Huth     for (i = 2; i >= 0; i--) {
128*fcf5ef2aSThomas Huth         close(fdarray[i]);
129*fcf5ef2aSThomas Huth     }
130*fcf5ef2aSThomas Huth }
131*fcf5ef2aSThomas Huth 
132*fcf5ef2aSThomas Huth static void kvm_arm_host_cpu_class_init(ObjectClass *oc, void *data)
133*fcf5ef2aSThomas Huth {
134*fcf5ef2aSThomas Huth     ARMHostCPUClass *ahcc = ARM_HOST_CPU_CLASS(oc);
135*fcf5ef2aSThomas Huth 
136*fcf5ef2aSThomas Huth     /* All we really need to set up for the 'host' CPU
137*fcf5ef2aSThomas Huth      * is the feature bits -- we rely on the fact that the
138*fcf5ef2aSThomas Huth      * various ID register values in ARMCPU are only used for
139*fcf5ef2aSThomas Huth      * TCG CPUs.
140*fcf5ef2aSThomas Huth      */
141*fcf5ef2aSThomas Huth     if (!kvm_arm_get_host_cpu_features(ahcc)) {
142*fcf5ef2aSThomas Huth         fprintf(stderr, "Failed to retrieve host CPU features!\n");
143*fcf5ef2aSThomas Huth         abort();
144*fcf5ef2aSThomas Huth     }
145*fcf5ef2aSThomas Huth }
146*fcf5ef2aSThomas Huth 
147*fcf5ef2aSThomas Huth static void kvm_arm_host_cpu_initfn(Object *obj)
148*fcf5ef2aSThomas Huth {
149*fcf5ef2aSThomas Huth     ARMHostCPUClass *ahcc = ARM_HOST_CPU_GET_CLASS(obj);
150*fcf5ef2aSThomas Huth     ARMCPU *cpu = ARM_CPU(obj);
151*fcf5ef2aSThomas Huth     CPUARMState *env = &cpu->env;
152*fcf5ef2aSThomas Huth 
153*fcf5ef2aSThomas Huth     cpu->kvm_target = ahcc->target;
154*fcf5ef2aSThomas Huth     cpu->dtb_compatible = ahcc->dtb_compatible;
155*fcf5ef2aSThomas Huth     env->features = ahcc->features;
156*fcf5ef2aSThomas Huth }
157*fcf5ef2aSThomas Huth 
158*fcf5ef2aSThomas Huth static const TypeInfo host_arm_cpu_type_info = {
159*fcf5ef2aSThomas Huth     .name = TYPE_ARM_HOST_CPU,
160*fcf5ef2aSThomas Huth #ifdef TARGET_AARCH64
161*fcf5ef2aSThomas Huth     .parent = TYPE_AARCH64_CPU,
162*fcf5ef2aSThomas Huth #else
163*fcf5ef2aSThomas Huth     .parent = TYPE_ARM_CPU,
164*fcf5ef2aSThomas Huth #endif
165*fcf5ef2aSThomas Huth     .instance_init = kvm_arm_host_cpu_initfn,
166*fcf5ef2aSThomas Huth     .class_init = kvm_arm_host_cpu_class_init,
167*fcf5ef2aSThomas Huth     .class_size = sizeof(ARMHostCPUClass),
168*fcf5ef2aSThomas Huth };
169*fcf5ef2aSThomas Huth 
170*fcf5ef2aSThomas Huth int kvm_arch_init(MachineState *ms, KVMState *s)
171*fcf5ef2aSThomas Huth {
172*fcf5ef2aSThomas Huth     /* For ARM interrupt delivery is always asynchronous,
173*fcf5ef2aSThomas Huth      * whether we are using an in-kernel VGIC or not.
174*fcf5ef2aSThomas Huth      */
175*fcf5ef2aSThomas Huth     kvm_async_interrupts_allowed = true;
176*fcf5ef2aSThomas Huth 
177*fcf5ef2aSThomas Huth     cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
178*fcf5ef2aSThomas Huth 
179*fcf5ef2aSThomas Huth     type_register_static(&host_arm_cpu_type_info);
180*fcf5ef2aSThomas Huth 
181*fcf5ef2aSThomas Huth     return 0;
182*fcf5ef2aSThomas Huth }
183*fcf5ef2aSThomas Huth 
184*fcf5ef2aSThomas Huth unsigned long kvm_arch_vcpu_id(CPUState *cpu)
185*fcf5ef2aSThomas Huth {
186*fcf5ef2aSThomas Huth     return cpu->cpu_index;
187*fcf5ef2aSThomas Huth }
188*fcf5ef2aSThomas Huth 
189*fcf5ef2aSThomas Huth /* We track all the KVM devices which need their memory addresses
190*fcf5ef2aSThomas Huth  * passing to the kernel in a list of these structures.
191*fcf5ef2aSThomas Huth  * When board init is complete we run through the list and
192*fcf5ef2aSThomas Huth  * tell the kernel the base addresses of the memory regions.
193*fcf5ef2aSThomas Huth  * We use a MemoryListener to track mapping and unmapping of
194*fcf5ef2aSThomas Huth  * the regions during board creation, so the board models don't
195*fcf5ef2aSThomas Huth  * need to do anything special for the KVM case.
196*fcf5ef2aSThomas Huth  */
197*fcf5ef2aSThomas Huth typedef struct KVMDevice {
198*fcf5ef2aSThomas Huth     struct kvm_arm_device_addr kda;
199*fcf5ef2aSThomas Huth     struct kvm_device_attr kdattr;
200*fcf5ef2aSThomas Huth     MemoryRegion *mr;
201*fcf5ef2aSThomas Huth     QSLIST_ENTRY(KVMDevice) entries;
202*fcf5ef2aSThomas Huth     int dev_fd;
203*fcf5ef2aSThomas Huth } KVMDevice;
204*fcf5ef2aSThomas Huth 
205*fcf5ef2aSThomas Huth static QSLIST_HEAD(kvm_devices_head, KVMDevice) kvm_devices_head;
206*fcf5ef2aSThomas Huth 
207*fcf5ef2aSThomas Huth static void kvm_arm_devlistener_add(MemoryListener *listener,
208*fcf5ef2aSThomas Huth                                     MemoryRegionSection *section)
209*fcf5ef2aSThomas Huth {
210*fcf5ef2aSThomas Huth     KVMDevice *kd;
211*fcf5ef2aSThomas Huth 
212*fcf5ef2aSThomas Huth     QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
213*fcf5ef2aSThomas Huth         if (section->mr == kd->mr) {
214*fcf5ef2aSThomas Huth             kd->kda.addr = section->offset_within_address_space;
215*fcf5ef2aSThomas Huth         }
216*fcf5ef2aSThomas Huth     }
217*fcf5ef2aSThomas Huth }
218*fcf5ef2aSThomas Huth 
219*fcf5ef2aSThomas Huth static void kvm_arm_devlistener_del(MemoryListener *listener,
220*fcf5ef2aSThomas Huth                                     MemoryRegionSection *section)
221*fcf5ef2aSThomas Huth {
222*fcf5ef2aSThomas Huth     KVMDevice *kd;
223*fcf5ef2aSThomas Huth 
224*fcf5ef2aSThomas Huth     QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
225*fcf5ef2aSThomas Huth         if (section->mr == kd->mr) {
226*fcf5ef2aSThomas Huth             kd->kda.addr = -1;
227*fcf5ef2aSThomas Huth         }
228*fcf5ef2aSThomas Huth     }
229*fcf5ef2aSThomas Huth }
230*fcf5ef2aSThomas Huth 
231*fcf5ef2aSThomas Huth static MemoryListener devlistener = {
232*fcf5ef2aSThomas Huth     .region_add = kvm_arm_devlistener_add,
233*fcf5ef2aSThomas Huth     .region_del = kvm_arm_devlistener_del,
234*fcf5ef2aSThomas Huth };
235*fcf5ef2aSThomas Huth 
236*fcf5ef2aSThomas Huth static void kvm_arm_set_device_addr(KVMDevice *kd)
237*fcf5ef2aSThomas Huth {
238*fcf5ef2aSThomas Huth     struct kvm_device_attr *attr = &kd->kdattr;
239*fcf5ef2aSThomas Huth     int ret;
240*fcf5ef2aSThomas Huth 
241*fcf5ef2aSThomas Huth     /* If the device control API is available and we have a device fd on the
242*fcf5ef2aSThomas Huth      * KVMDevice struct, let's use the newer API
243*fcf5ef2aSThomas Huth      */
244*fcf5ef2aSThomas Huth     if (kd->dev_fd >= 0) {
245*fcf5ef2aSThomas Huth         uint64_t addr = kd->kda.addr;
246*fcf5ef2aSThomas Huth         attr->addr = (uintptr_t)&addr;
247*fcf5ef2aSThomas Huth         ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr);
248*fcf5ef2aSThomas Huth     } else {
249*fcf5ef2aSThomas Huth         ret = kvm_vm_ioctl(kvm_state, KVM_ARM_SET_DEVICE_ADDR, &kd->kda);
250*fcf5ef2aSThomas Huth     }
251*fcf5ef2aSThomas Huth 
252*fcf5ef2aSThomas Huth     if (ret < 0) {
253*fcf5ef2aSThomas Huth         fprintf(stderr, "Failed to set device address: %s\n",
254*fcf5ef2aSThomas Huth                 strerror(-ret));
255*fcf5ef2aSThomas Huth         abort();
256*fcf5ef2aSThomas Huth     }
257*fcf5ef2aSThomas Huth }
258*fcf5ef2aSThomas Huth 
259*fcf5ef2aSThomas Huth static void kvm_arm_machine_init_done(Notifier *notifier, void *data)
260*fcf5ef2aSThomas Huth {
261*fcf5ef2aSThomas Huth     KVMDevice *kd, *tkd;
262*fcf5ef2aSThomas Huth 
263*fcf5ef2aSThomas Huth     memory_listener_unregister(&devlistener);
264*fcf5ef2aSThomas Huth     QSLIST_FOREACH_SAFE(kd, &kvm_devices_head, entries, tkd) {
265*fcf5ef2aSThomas Huth         if (kd->kda.addr != -1) {
266*fcf5ef2aSThomas Huth             kvm_arm_set_device_addr(kd);
267*fcf5ef2aSThomas Huth         }
268*fcf5ef2aSThomas Huth         memory_region_unref(kd->mr);
269*fcf5ef2aSThomas Huth         g_free(kd);
270*fcf5ef2aSThomas Huth     }
271*fcf5ef2aSThomas Huth }
272*fcf5ef2aSThomas Huth 
273*fcf5ef2aSThomas Huth static Notifier notify = {
274*fcf5ef2aSThomas Huth     .notify = kvm_arm_machine_init_done,
275*fcf5ef2aSThomas Huth };
276*fcf5ef2aSThomas Huth 
277*fcf5ef2aSThomas Huth void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
278*fcf5ef2aSThomas Huth                              uint64_t attr, int dev_fd)
279*fcf5ef2aSThomas Huth {
280*fcf5ef2aSThomas Huth     KVMDevice *kd;
281*fcf5ef2aSThomas Huth 
282*fcf5ef2aSThomas Huth     if (!kvm_irqchip_in_kernel()) {
283*fcf5ef2aSThomas Huth         return;
284*fcf5ef2aSThomas Huth     }
285*fcf5ef2aSThomas Huth 
286*fcf5ef2aSThomas Huth     if (QSLIST_EMPTY(&kvm_devices_head)) {
287*fcf5ef2aSThomas Huth         memory_listener_register(&devlistener, &address_space_memory);
288*fcf5ef2aSThomas Huth         qemu_add_machine_init_done_notifier(&notify);
289*fcf5ef2aSThomas Huth     }
290*fcf5ef2aSThomas Huth     kd = g_new0(KVMDevice, 1);
291*fcf5ef2aSThomas Huth     kd->mr = mr;
292*fcf5ef2aSThomas Huth     kd->kda.id = devid;
293*fcf5ef2aSThomas Huth     kd->kda.addr = -1;
294*fcf5ef2aSThomas Huth     kd->kdattr.flags = 0;
295*fcf5ef2aSThomas Huth     kd->kdattr.group = group;
296*fcf5ef2aSThomas Huth     kd->kdattr.attr = attr;
297*fcf5ef2aSThomas Huth     kd->dev_fd = dev_fd;
298*fcf5ef2aSThomas Huth     QSLIST_INSERT_HEAD(&kvm_devices_head, kd, entries);
299*fcf5ef2aSThomas Huth     memory_region_ref(kd->mr);
300*fcf5ef2aSThomas Huth }
301*fcf5ef2aSThomas Huth 
302*fcf5ef2aSThomas Huth static int compare_u64(const void *a, const void *b)
303*fcf5ef2aSThomas Huth {
304*fcf5ef2aSThomas Huth     if (*(uint64_t *)a > *(uint64_t *)b) {
305*fcf5ef2aSThomas Huth         return 1;
306*fcf5ef2aSThomas Huth     }
307*fcf5ef2aSThomas Huth     if (*(uint64_t *)a < *(uint64_t *)b) {
308*fcf5ef2aSThomas Huth         return -1;
309*fcf5ef2aSThomas Huth     }
310*fcf5ef2aSThomas Huth     return 0;
311*fcf5ef2aSThomas Huth }
312*fcf5ef2aSThomas Huth 
313*fcf5ef2aSThomas Huth /* Initialize the CPUState's cpreg list according to the kernel's
314*fcf5ef2aSThomas Huth  * definition of what CPU registers it knows about (and throw away
315*fcf5ef2aSThomas Huth  * the previous TCG-created cpreg list).
316*fcf5ef2aSThomas Huth  */
317*fcf5ef2aSThomas Huth int kvm_arm_init_cpreg_list(ARMCPU *cpu)
318*fcf5ef2aSThomas Huth {
319*fcf5ef2aSThomas Huth     struct kvm_reg_list rl;
320*fcf5ef2aSThomas Huth     struct kvm_reg_list *rlp;
321*fcf5ef2aSThomas Huth     int i, ret, arraylen;
322*fcf5ef2aSThomas Huth     CPUState *cs = CPU(cpu);
323*fcf5ef2aSThomas Huth 
324*fcf5ef2aSThomas Huth     rl.n = 0;
325*fcf5ef2aSThomas Huth     ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl);
326*fcf5ef2aSThomas Huth     if (ret != -E2BIG) {
327*fcf5ef2aSThomas Huth         return ret;
328*fcf5ef2aSThomas Huth     }
329*fcf5ef2aSThomas Huth     rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t));
330*fcf5ef2aSThomas Huth     rlp->n = rl.n;
331*fcf5ef2aSThomas Huth     ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp);
332*fcf5ef2aSThomas Huth     if (ret) {
333*fcf5ef2aSThomas Huth         goto out;
334*fcf5ef2aSThomas Huth     }
335*fcf5ef2aSThomas Huth     /* Sort the list we get back from the kernel, since cpreg_tuples
336*fcf5ef2aSThomas Huth      * must be in strictly ascending order.
337*fcf5ef2aSThomas Huth      */
338*fcf5ef2aSThomas Huth     qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64);
339*fcf5ef2aSThomas Huth 
340*fcf5ef2aSThomas Huth     for (i = 0, arraylen = 0; i < rlp->n; i++) {
341*fcf5ef2aSThomas Huth         if (!kvm_arm_reg_syncs_via_cpreg_list(rlp->reg[i])) {
342*fcf5ef2aSThomas Huth             continue;
343*fcf5ef2aSThomas Huth         }
344*fcf5ef2aSThomas Huth         switch (rlp->reg[i] & KVM_REG_SIZE_MASK) {
345*fcf5ef2aSThomas Huth         case KVM_REG_SIZE_U32:
346*fcf5ef2aSThomas Huth         case KVM_REG_SIZE_U64:
347*fcf5ef2aSThomas Huth             break;
348*fcf5ef2aSThomas Huth         default:
349*fcf5ef2aSThomas Huth             fprintf(stderr, "Can't handle size of register in kernel list\n");
350*fcf5ef2aSThomas Huth             ret = -EINVAL;
351*fcf5ef2aSThomas Huth             goto out;
352*fcf5ef2aSThomas Huth         }
353*fcf5ef2aSThomas Huth 
354*fcf5ef2aSThomas Huth         arraylen++;
355*fcf5ef2aSThomas Huth     }
356*fcf5ef2aSThomas Huth 
357*fcf5ef2aSThomas Huth     cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen);
358*fcf5ef2aSThomas Huth     cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen);
359*fcf5ef2aSThomas Huth     cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes,
360*fcf5ef2aSThomas Huth                                          arraylen);
361*fcf5ef2aSThomas Huth     cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values,
362*fcf5ef2aSThomas Huth                                         arraylen);
363*fcf5ef2aSThomas Huth     cpu->cpreg_array_len = arraylen;
364*fcf5ef2aSThomas Huth     cpu->cpreg_vmstate_array_len = arraylen;
365*fcf5ef2aSThomas Huth 
366*fcf5ef2aSThomas Huth     for (i = 0, arraylen = 0; i < rlp->n; i++) {
367*fcf5ef2aSThomas Huth         uint64_t regidx = rlp->reg[i];
368*fcf5ef2aSThomas Huth         if (!kvm_arm_reg_syncs_via_cpreg_list(regidx)) {
369*fcf5ef2aSThomas Huth             continue;
370*fcf5ef2aSThomas Huth         }
371*fcf5ef2aSThomas Huth         cpu->cpreg_indexes[arraylen] = regidx;
372*fcf5ef2aSThomas Huth         arraylen++;
373*fcf5ef2aSThomas Huth     }
374*fcf5ef2aSThomas Huth     assert(cpu->cpreg_array_len == arraylen);
375*fcf5ef2aSThomas Huth 
376*fcf5ef2aSThomas Huth     if (!write_kvmstate_to_list(cpu)) {
377*fcf5ef2aSThomas Huth         /* Shouldn't happen unless kernel is inconsistent about
378*fcf5ef2aSThomas Huth          * what registers exist.
379*fcf5ef2aSThomas Huth          */
380*fcf5ef2aSThomas Huth         fprintf(stderr, "Initial read of kernel register state failed\n");
381*fcf5ef2aSThomas Huth         ret = -EINVAL;
382*fcf5ef2aSThomas Huth         goto out;
383*fcf5ef2aSThomas Huth     }
384*fcf5ef2aSThomas Huth 
385*fcf5ef2aSThomas Huth out:
386*fcf5ef2aSThomas Huth     g_free(rlp);
387*fcf5ef2aSThomas Huth     return ret;
388*fcf5ef2aSThomas Huth }
389*fcf5ef2aSThomas Huth 
390*fcf5ef2aSThomas Huth bool write_kvmstate_to_list(ARMCPU *cpu)
391*fcf5ef2aSThomas Huth {
392*fcf5ef2aSThomas Huth     CPUState *cs = CPU(cpu);
393*fcf5ef2aSThomas Huth     int i;
394*fcf5ef2aSThomas Huth     bool ok = true;
395*fcf5ef2aSThomas Huth 
396*fcf5ef2aSThomas Huth     for (i = 0; i < cpu->cpreg_array_len; i++) {
397*fcf5ef2aSThomas Huth         struct kvm_one_reg r;
398*fcf5ef2aSThomas Huth         uint64_t regidx = cpu->cpreg_indexes[i];
399*fcf5ef2aSThomas Huth         uint32_t v32;
400*fcf5ef2aSThomas Huth         int ret;
401*fcf5ef2aSThomas Huth 
402*fcf5ef2aSThomas Huth         r.id = regidx;
403*fcf5ef2aSThomas Huth 
404*fcf5ef2aSThomas Huth         switch (regidx & KVM_REG_SIZE_MASK) {
405*fcf5ef2aSThomas Huth         case KVM_REG_SIZE_U32:
406*fcf5ef2aSThomas Huth             r.addr = (uintptr_t)&v32;
407*fcf5ef2aSThomas Huth             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
408*fcf5ef2aSThomas Huth             if (!ret) {
409*fcf5ef2aSThomas Huth                 cpu->cpreg_values[i] = v32;
410*fcf5ef2aSThomas Huth             }
411*fcf5ef2aSThomas Huth             break;
412*fcf5ef2aSThomas Huth         case KVM_REG_SIZE_U64:
413*fcf5ef2aSThomas Huth             r.addr = (uintptr_t)(cpu->cpreg_values + i);
414*fcf5ef2aSThomas Huth             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
415*fcf5ef2aSThomas Huth             break;
416*fcf5ef2aSThomas Huth         default:
417*fcf5ef2aSThomas Huth             abort();
418*fcf5ef2aSThomas Huth         }
419*fcf5ef2aSThomas Huth         if (ret) {
420*fcf5ef2aSThomas Huth             ok = false;
421*fcf5ef2aSThomas Huth         }
422*fcf5ef2aSThomas Huth     }
423*fcf5ef2aSThomas Huth     return ok;
424*fcf5ef2aSThomas Huth }
425*fcf5ef2aSThomas Huth 
426*fcf5ef2aSThomas Huth bool write_list_to_kvmstate(ARMCPU *cpu, int level)
427*fcf5ef2aSThomas Huth {
428*fcf5ef2aSThomas Huth     CPUState *cs = CPU(cpu);
429*fcf5ef2aSThomas Huth     int i;
430*fcf5ef2aSThomas Huth     bool ok = true;
431*fcf5ef2aSThomas Huth 
432*fcf5ef2aSThomas Huth     for (i = 0; i < cpu->cpreg_array_len; i++) {
433*fcf5ef2aSThomas Huth         struct kvm_one_reg r;
434*fcf5ef2aSThomas Huth         uint64_t regidx = cpu->cpreg_indexes[i];
435*fcf5ef2aSThomas Huth         uint32_t v32;
436*fcf5ef2aSThomas Huth         int ret;
437*fcf5ef2aSThomas Huth 
438*fcf5ef2aSThomas Huth         if (kvm_arm_cpreg_level(regidx) > level) {
439*fcf5ef2aSThomas Huth             continue;
440*fcf5ef2aSThomas Huth         }
441*fcf5ef2aSThomas Huth 
442*fcf5ef2aSThomas Huth         r.id = regidx;
443*fcf5ef2aSThomas Huth         switch (regidx & KVM_REG_SIZE_MASK) {
444*fcf5ef2aSThomas Huth         case KVM_REG_SIZE_U32:
445*fcf5ef2aSThomas Huth             v32 = cpu->cpreg_values[i];
446*fcf5ef2aSThomas Huth             r.addr = (uintptr_t)&v32;
447*fcf5ef2aSThomas Huth             break;
448*fcf5ef2aSThomas Huth         case KVM_REG_SIZE_U64:
449*fcf5ef2aSThomas Huth             r.addr = (uintptr_t)(cpu->cpreg_values + i);
450*fcf5ef2aSThomas Huth             break;
451*fcf5ef2aSThomas Huth         default:
452*fcf5ef2aSThomas Huth             abort();
453*fcf5ef2aSThomas Huth         }
454*fcf5ef2aSThomas Huth         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
455*fcf5ef2aSThomas Huth         if (ret) {
456*fcf5ef2aSThomas Huth             /* We might fail for "unknown register" and also for
457*fcf5ef2aSThomas Huth              * "you tried to set a register which is constant with
458*fcf5ef2aSThomas Huth              * a different value from what it actually contains".
459*fcf5ef2aSThomas Huth              */
460*fcf5ef2aSThomas Huth             ok = false;
461*fcf5ef2aSThomas Huth         }
462*fcf5ef2aSThomas Huth     }
463*fcf5ef2aSThomas Huth     return ok;
464*fcf5ef2aSThomas Huth }
465*fcf5ef2aSThomas Huth 
466*fcf5ef2aSThomas Huth void kvm_arm_reset_vcpu(ARMCPU *cpu)
467*fcf5ef2aSThomas Huth {
468*fcf5ef2aSThomas Huth     int ret;
469*fcf5ef2aSThomas Huth 
470*fcf5ef2aSThomas Huth     /* Re-init VCPU so that all registers are set to
471*fcf5ef2aSThomas Huth      * their respective reset values.
472*fcf5ef2aSThomas Huth      */
473*fcf5ef2aSThomas Huth     ret = kvm_arm_vcpu_init(CPU(cpu));
474*fcf5ef2aSThomas Huth     if (ret < 0) {
475*fcf5ef2aSThomas Huth         fprintf(stderr, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret));
476*fcf5ef2aSThomas Huth         abort();
477*fcf5ef2aSThomas Huth     }
478*fcf5ef2aSThomas Huth     if (!write_kvmstate_to_list(cpu)) {
479*fcf5ef2aSThomas Huth         fprintf(stderr, "write_kvmstate_to_list failed\n");
480*fcf5ef2aSThomas Huth         abort();
481*fcf5ef2aSThomas Huth     }
482*fcf5ef2aSThomas Huth }
483*fcf5ef2aSThomas Huth 
484*fcf5ef2aSThomas Huth /*
485*fcf5ef2aSThomas Huth  * Update KVM's MP_STATE based on what QEMU thinks it is
486*fcf5ef2aSThomas Huth  */
487*fcf5ef2aSThomas Huth int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu)
488*fcf5ef2aSThomas Huth {
489*fcf5ef2aSThomas Huth     if (cap_has_mp_state) {
490*fcf5ef2aSThomas Huth         struct kvm_mp_state mp_state = {
491*fcf5ef2aSThomas Huth             .mp_state =
492*fcf5ef2aSThomas Huth             cpu->powered_off ? KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE
493*fcf5ef2aSThomas Huth         };
494*fcf5ef2aSThomas Huth         int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
495*fcf5ef2aSThomas Huth         if (ret) {
496*fcf5ef2aSThomas Huth             fprintf(stderr, "%s: failed to set MP_STATE %d/%s\n",
497*fcf5ef2aSThomas Huth                     __func__, ret, strerror(-ret));
498*fcf5ef2aSThomas Huth             return -1;
499*fcf5ef2aSThomas Huth         }
500*fcf5ef2aSThomas Huth     }
501*fcf5ef2aSThomas Huth 
502*fcf5ef2aSThomas Huth     return 0;
503*fcf5ef2aSThomas Huth }
504*fcf5ef2aSThomas Huth 
505*fcf5ef2aSThomas Huth /*
506*fcf5ef2aSThomas Huth  * Sync the KVM MP_STATE into QEMU
507*fcf5ef2aSThomas Huth  */
508*fcf5ef2aSThomas Huth int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
509*fcf5ef2aSThomas Huth {
510*fcf5ef2aSThomas Huth     if (cap_has_mp_state) {
511*fcf5ef2aSThomas Huth         struct kvm_mp_state mp_state;
512*fcf5ef2aSThomas Huth         int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state);
513*fcf5ef2aSThomas Huth         if (ret) {
514*fcf5ef2aSThomas Huth             fprintf(stderr, "%s: failed to get MP_STATE %d/%s\n",
515*fcf5ef2aSThomas Huth                     __func__, ret, strerror(-ret));
516*fcf5ef2aSThomas Huth             abort();
517*fcf5ef2aSThomas Huth         }
518*fcf5ef2aSThomas Huth         cpu->powered_off = (mp_state.mp_state == KVM_MP_STATE_STOPPED);
519*fcf5ef2aSThomas Huth     }
520*fcf5ef2aSThomas Huth 
521*fcf5ef2aSThomas Huth     return 0;
522*fcf5ef2aSThomas Huth }
523*fcf5ef2aSThomas Huth 
524*fcf5ef2aSThomas Huth void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
525*fcf5ef2aSThomas Huth {
526*fcf5ef2aSThomas Huth }
527*fcf5ef2aSThomas Huth 
528*fcf5ef2aSThomas Huth MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
529*fcf5ef2aSThomas Huth {
530*fcf5ef2aSThomas Huth     return MEMTXATTRS_UNSPECIFIED;
531*fcf5ef2aSThomas Huth }
532*fcf5ef2aSThomas Huth 
533*fcf5ef2aSThomas Huth 
534*fcf5ef2aSThomas Huth int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
535*fcf5ef2aSThomas Huth {
536*fcf5ef2aSThomas Huth     int ret = 0;
537*fcf5ef2aSThomas Huth 
538*fcf5ef2aSThomas Huth     switch (run->exit_reason) {
539*fcf5ef2aSThomas Huth     case KVM_EXIT_DEBUG:
540*fcf5ef2aSThomas Huth         if (kvm_arm_handle_debug(cs, &run->debug.arch)) {
541*fcf5ef2aSThomas Huth             ret = EXCP_DEBUG;
542*fcf5ef2aSThomas Huth         } /* otherwise return to guest */
543*fcf5ef2aSThomas Huth         break;
544*fcf5ef2aSThomas Huth     default:
545*fcf5ef2aSThomas Huth         qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
546*fcf5ef2aSThomas Huth                       __func__, run->exit_reason);
547*fcf5ef2aSThomas Huth         break;
548*fcf5ef2aSThomas Huth     }
549*fcf5ef2aSThomas Huth     return ret;
550*fcf5ef2aSThomas Huth }
551*fcf5ef2aSThomas Huth 
552*fcf5ef2aSThomas Huth bool kvm_arch_stop_on_emulation_error(CPUState *cs)
553*fcf5ef2aSThomas Huth {
554*fcf5ef2aSThomas Huth     return true;
555*fcf5ef2aSThomas Huth }
556*fcf5ef2aSThomas Huth 
557*fcf5ef2aSThomas Huth int kvm_arch_process_async_events(CPUState *cs)
558*fcf5ef2aSThomas Huth {
559*fcf5ef2aSThomas Huth     return 0;
560*fcf5ef2aSThomas Huth }
561*fcf5ef2aSThomas Huth 
562*fcf5ef2aSThomas Huth int kvm_arch_on_sigbus_vcpu(CPUState *cs, int code, void *addr)
563*fcf5ef2aSThomas Huth {
564*fcf5ef2aSThomas Huth     return 1;
565*fcf5ef2aSThomas Huth }
566*fcf5ef2aSThomas Huth 
567*fcf5ef2aSThomas Huth int kvm_arch_on_sigbus(int code, void *addr)
568*fcf5ef2aSThomas Huth {
569*fcf5ef2aSThomas Huth     return 1;
570*fcf5ef2aSThomas Huth }
571*fcf5ef2aSThomas Huth 
572*fcf5ef2aSThomas Huth /* The #ifdef protections are until 32bit headers are imported and can
573*fcf5ef2aSThomas Huth  * be removed once both 32 and 64 bit reach feature parity.
574*fcf5ef2aSThomas Huth  */
575*fcf5ef2aSThomas Huth void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
576*fcf5ef2aSThomas Huth {
577*fcf5ef2aSThomas Huth #ifdef KVM_GUESTDBG_USE_SW_BP
578*fcf5ef2aSThomas Huth     if (kvm_sw_breakpoints_active(cs)) {
579*fcf5ef2aSThomas Huth         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
580*fcf5ef2aSThomas Huth     }
581*fcf5ef2aSThomas Huth #endif
582*fcf5ef2aSThomas Huth #ifdef KVM_GUESTDBG_USE_HW
583*fcf5ef2aSThomas Huth     if (kvm_arm_hw_debug_active(cs)) {
584*fcf5ef2aSThomas Huth         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW;
585*fcf5ef2aSThomas Huth         kvm_arm_copy_hw_debug_data(&dbg->arch);
586*fcf5ef2aSThomas Huth     }
587*fcf5ef2aSThomas Huth #endif
588*fcf5ef2aSThomas Huth }
589*fcf5ef2aSThomas Huth 
590*fcf5ef2aSThomas Huth void kvm_arch_init_irq_routing(KVMState *s)
591*fcf5ef2aSThomas Huth {
592*fcf5ef2aSThomas Huth }
593*fcf5ef2aSThomas Huth 
594*fcf5ef2aSThomas Huth int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
595*fcf5ef2aSThomas Huth {
596*fcf5ef2aSThomas Huth      if (machine_kernel_irqchip_split(ms)) {
597*fcf5ef2aSThomas Huth          perror("-machine kernel_irqchip=split is not supported on ARM.");
598*fcf5ef2aSThomas Huth          exit(1);
599*fcf5ef2aSThomas Huth     }
600*fcf5ef2aSThomas Huth 
601*fcf5ef2aSThomas Huth     /* If we can create the VGIC using the newer device control API, we
602*fcf5ef2aSThomas Huth      * let the device do this when it initializes itself, otherwise we
603*fcf5ef2aSThomas Huth      * fall back to the old API */
604*fcf5ef2aSThomas Huth     return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
605*fcf5ef2aSThomas Huth }
606*fcf5ef2aSThomas Huth 
607*fcf5ef2aSThomas Huth int kvm_arm_vgic_probe(void)
608*fcf5ef2aSThomas Huth {
609*fcf5ef2aSThomas Huth     if (kvm_create_device(kvm_state,
610*fcf5ef2aSThomas Huth                           KVM_DEV_TYPE_ARM_VGIC_V3, true) == 0) {
611*fcf5ef2aSThomas Huth         return 3;
612*fcf5ef2aSThomas Huth     } else if (kvm_create_device(kvm_state,
613*fcf5ef2aSThomas Huth                                  KVM_DEV_TYPE_ARM_VGIC_V2, true) == 0) {
614*fcf5ef2aSThomas Huth         return 2;
615*fcf5ef2aSThomas Huth     } else {
616*fcf5ef2aSThomas Huth         return 0;
617*fcf5ef2aSThomas Huth     }
618*fcf5ef2aSThomas Huth }
619*fcf5ef2aSThomas Huth 
620*fcf5ef2aSThomas Huth int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
621*fcf5ef2aSThomas Huth                              uint64_t address, uint32_t data, PCIDevice *dev)
622*fcf5ef2aSThomas Huth {
623*fcf5ef2aSThomas Huth     return 0;
624*fcf5ef2aSThomas Huth }
625*fcf5ef2aSThomas Huth 
626*fcf5ef2aSThomas Huth int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
627*fcf5ef2aSThomas Huth                                 int vector, PCIDevice *dev)
628*fcf5ef2aSThomas Huth {
629*fcf5ef2aSThomas Huth     return 0;
630*fcf5ef2aSThomas Huth }
631*fcf5ef2aSThomas Huth 
632*fcf5ef2aSThomas Huth int kvm_arch_release_virq_post(int virq)
633*fcf5ef2aSThomas Huth {
634*fcf5ef2aSThomas Huth     return 0;
635*fcf5ef2aSThomas Huth }
636*fcf5ef2aSThomas Huth 
637*fcf5ef2aSThomas Huth int kvm_arch_msi_data_to_gsi(uint32_t data)
638*fcf5ef2aSThomas Huth {
639*fcf5ef2aSThomas Huth     return (data - 32) & 0xffff;
640*fcf5ef2aSThomas Huth }
641