1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * KVM Microsoft Hyper-V emulation
4 *
5 * derived from arch/x86/kvm/x86.c
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
12 *
13 * Authors:
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 * Amit Shah <amit.shah@qumranet.com>
17 * Ben-Ami Yassour <benami@il.ibm.com>
18 * Andrey Smetanin <asmetanin@virtuozzo.com>
19 */
20
21 #include "x86.h"
22 #include "lapic.h"
23 #include "ioapic.h"
24 #include "cpuid.h"
25 #include "hyperv.h"
26 #include "xen.h"
27
28 #include <linux/cpu.h>
29 #include <linux/kvm_host.h>
30 #include <linux/highmem.h>
31 #include <linux/sched/cputime.h>
32 #include <linux/eventfd.h>
33
34 #include <asm/apicdef.h>
35 #include <trace/events/kvm.h>
36
37 #include "trace.h"
38 #include "irq.h"
39
40 /* "Hv#1" signature */
41 #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
42
43 #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
44
45 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
46 bool vcpu_kick);
47
synic_read_sint(struct kvm_vcpu_hv_synic * synic,int sint)48 static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
49 {
50 return atomic64_read(&synic->sint[sint]);
51 }
52
synic_get_sint_vector(u64 sint_value)53 static inline int synic_get_sint_vector(u64 sint_value)
54 {
55 if (sint_value & HV_SYNIC_SINT_MASKED)
56 return -1;
57 return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
58 }
59
synic_has_vector_connected(struct kvm_vcpu_hv_synic * synic,int vector)60 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
61 int vector)
62 {
63 int i;
64
65 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
66 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
67 return true;
68 }
69 return false;
70 }
71
synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic * synic,int vector)72 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
73 int vector)
74 {
75 int i;
76 u64 sint_value;
77
78 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
79 sint_value = synic_read_sint(synic, i);
80 if (synic_get_sint_vector(sint_value) == vector &&
81 sint_value & HV_SYNIC_SINT_AUTO_EOI)
82 return true;
83 }
84 return false;
85 }
86
synic_update_vector(struct kvm_vcpu_hv_synic * synic,int vector)87 static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
88 int vector)
89 {
90 if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
91 return;
92
93 if (synic_has_vector_connected(synic, vector))
94 __set_bit(vector, synic->vec_bitmap);
95 else
96 __clear_bit(vector, synic->vec_bitmap);
97
98 if (synic_has_vector_auto_eoi(synic, vector))
99 __set_bit(vector, synic->auto_eoi_bitmap);
100 else
101 __clear_bit(vector, synic->auto_eoi_bitmap);
102 }
103
synic_set_sint(struct kvm_vcpu_hv_synic * synic,int sint,u64 data,bool host)104 static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
105 u64 data, bool host)
106 {
107 int vector, old_vector;
108 bool masked;
109
110 vector = data & HV_SYNIC_SINT_VECTOR_MASK;
111 masked = data & HV_SYNIC_SINT_MASKED;
112
113 /*
114 * Valid vectors are 16-255, however, nested Hyper-V attempts to write
115 * default '0x10000' value on boot and this should not #GP. We need to
116 * allow zero-initing the register from host as well.
117 */
118 if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
119 return 1;
120 /*
121 * Guest may configure multiple SINTs to use the same vector, so
122 * we maintain a bitmap of vectors handled by synic, and a
123 * bitmap of vectors with auto-eoi behavior. The bitmaps are
124 * updated here, and atomically queried on fast paths.
125 */
126 old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
127
128 atomic64_set(&synic->sint[sint], data);
129
130 synic_update_vector(synic, old_vector);
131
132 synic_update_vector(synic, vector);
133
134 /* Load SynIC vectors into EOI exit bitmap */
135 kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic));
136 return 0;
137 }
138
get_vcpu_by_vpidx(struct kvm * kvm,u32 vpidx)139 static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
140 {
141 struct kvm_vcpu *vcpu = NULL;
142 int i;
143
144 if (vpidx >= KVM_MAX_VCPUS)
145 return NULL;
146
147 vcpu = kvm_get_vcpu(kvm, vpidx);
148 if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx)
149 return vcpu;
150 kvm_for_each_vcpu(i, vcpu, kvm)
151 if (kvm_hv_get_vpindex(vcpu) == vpidx)
152 return vcpu;
153 return NULL;
154 }
155
synic_get(struct kvm * kvm,u32 vpidx)156 static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
157 {
158 struct kvm_vcpu *vcpu;
159 struct kvm_vcpu_hv_synic *synic;
160
161 vcpu = get_vcpu_by_vpidx(kvm, vpidx);
162 if (!vcpu || !to_hv_vcpu(vcpu))
163 return NULL;
164 synic = to_hv_synic(vcpu);
165 return (synic->active) ? synic : NULL;
166 }
167
kvm_hv_notify_acked_sint(struct kvm_vcpu * vcpu,u32 sint)168 static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
169 {
170 struct kvm *kvm = vcpu->kvm;
171 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
172 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
173 struct kvm_vcpu_hv_stimer *stimer;
174 int gsi, idx;
175
176 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
177
178 /* Try to deliver pending Hyper-V SynIC timers messages */
179 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
180 stimer = &hv_vcpu->stimer[idx];
181 if (stimer->msg_pending && stimer->config.enable &&
182 !stimer->config.direct_mode &&
183 stimer->config.sintx == sint)
184 stimer_mark_pending(stimer, false);
185 }
186
187 idx = srcu_read_lock(&kvm->irq_srcu);
188 gsi = atomic_read(&synic->sint_to_gsi[sint]);
189 if (gsi != -1)
190 kvm_notify_acked_gsi(kvm, gsi);
191 srcu_read_unlock(&kvm->irq_srcu, idx);
192 }
193
synic_exit(struct kvm_vcpu_hv_synic * synic,u32 msr)194 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
195 {
196 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
197 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
198
199 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
200 hv_vcpu->exit.u.synic.msr = msr;
201 hv_vcpu->exit.u.synic.control = synic->control;
202 hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
203 hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
204
205 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
206 }
207
synic_set_msr(struct kvm_vcpu_hv_synic * synic,u32 msr,u64 data,bool host)208 static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
209 u32 msr, u64 data, bool host)
210 {
211 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
212 int ret;
213
214 if (!synic->active && !host)
215 return 1;
216
217 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
218
219 ret = 0;
220 switch (msr) {
221 case HV_X64_MSR_SCONTROL:
222 synic->control = data;
223 if (!host)
224 synic_exit(synic, msr);
225 break;
226 case HV_X64_MSR_SVERSION:
227 if (!host) {
228 ret = 1;
229 break;
230 }
231 synic->version = data;
232 break;
233 case HV_X64_MSR_SIEFP:
234 if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
235 !synic->dont_zero_synic_pages)
236 if (kvm_clear_guest(vcpu->kvm,
237 data & PAGE_MASK, PAGE_SIZE)) {
238 ret = 1;
239 break;
240 }
241 synic->evt_page = data;
242 if (!host)
243 synic_exit(synic, msr);
244 break;
245 case HV_X64_MSR_SIMP:
246 if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
247 !synic->dont_zero_synic_pages)
248 if (kvm_clear_guest(vcpu->kvm,
249 data & PAGE_MASK, PAGE_SIZE)) {
250 ret = 1;
251 break;
252 }
253 synic->msg_page = data;
254 if (!host)
255 synic_exit(synic, msr);
256 break;
257 case HV_X64_MSR_EOM: {
258 int i;
259
260 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
261 kvm_hv_notify_acked_sint(vcpu, i);
262 break;
263 }
264 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
265 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
266 break;
267 default:
268 ret = 1;
269 break;
270 }
271 return ret;
272 }
273
kvm_hv_is_syndbg_enabled(struct kvm_vcpu * vcpu)274 static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
275 {
276 struct kvm_cpuid_entry2 *entry;
277
278 entry = kvm_find_cpuid_entry(vcpu,
279 HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES,
280 0);
281 if (!entry)
282 return false;
283
284 return entry->eax & HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
285 }
286
kvm_hv_syndbg_complete_userspace(struct kvm_vcpu * vcpu)287 static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
288 {
289 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
290
291 if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
292 hv->hv_syndbg.control.status =
293 vcpu->run->hyperv.u.syndbg.status;
294 return 1;
295 }
296
syndbg_exit(struct kvm_vcpu * vcpu,u32 msr)297 static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
298 {
299 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
300 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
301
302 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
303 hv_vcpu->exit.u.syndbg.msr = msr;
304 hv_vcpu->exit.u.syndbg.control = syndbg->control.control;
305 hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page;
306 hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page;
307 hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page;
308 vcpu->arch.complete_userspace_io =
309 kvm_hv_syndbg_complete_userspace;
310
311 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
312 }
313
syndbg_set_msr(struct kvm_vcpu * vcpu,u32 msr,u64 data,bool host)314 static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
315 {
316 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
317
318 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
319 return 1;
320
321 trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
322 to_hv_vcpu(vcpu)->vp_index, msr, data);
323 switch (msr) {
324 case HV_X64_MSR_SYNDBG_CONTROL:
325 syndbg->control.control = data;
326 if (!host)
327 syndbg_exit(vcpu, msr);
328 break;
329 case HV_X64_MSR_SYNDBG_STATUS:
330 syndbg->control.status = data;
331 break;
332 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
333 syndbg->control.send_page = data;
334 break;
335 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
336 syndbg->control.recv_page = data;
337 break;
338 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
339 syndbg->control.pending_page = data;
340 if (!host)
341 syndbg_exit(vcpu, msr);
342 break;
343 case HV_X64_MSR_SYNDBG_OPTIONS:
344 syndbg->options = data;
345 break;
346 default:
347 break;
348 }
349
350 return 0;
351 }
352
syndbg_get_msr(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata,bool host)353 static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
354 {
355 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
356
357 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
358 return 1;
359
360 switch (msr) {
361 case HV_X64_MSR_SYNDBG_CONTROL:
362 *pdata = syndbg->control.control;
363 break;
364 case HV_X64_MSR_SYNDBG_STATUS:
365 *pdata = syndbg->control.status;
366 break;
367 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
368 *pdata = syndbg->control.send_page;
369 break;
370 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
371 *pdata = syndbg->control.recv_page;
372 break;
373 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
374 *pdata = syndbg->control.pending_page;
375 break;
376 case HV_X64_MSR_SYNDBG_OPTIONS:
377 *pdata = syndbg->options;
378 break;
379 default:
380 break;
381 }
382
383 trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata);
384
385 return 0;
386 }
387
synic_get_msr(struct kvm_vcpu_hv_synic * synic,u32 msr,u64 * pdata,bool host)388 static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
389 bool host)
390 {
391 int ret;
392
393 if (!synic->active && !host)
394 return 1;
395
396 ret = 0;
397 switch (msr) {
398 case HV_X64_MSR_SCONTROL:
399 *pdata = synic->control;
400 break;
401 case HV_X64_MSR_SVERSION:
402 *pdata = synic->version;
403 break;
404 case HV_X64_MSR_SIEFP:
405 *pdata = synic->evt_page;
406 break;
407 case HV_X64_MSR_SIMP:
408 *pdata = synic->msg_page;
409 break;
410 case HV_X64_MSR_EOM:
411 *pdata = 0;
412 break;
413 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
414 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
415 break;
416 default:
417 ret = 1;
418 break;
419 }
420 return ret;
421 }
422
synic_set_irq(struct kvm_vcpu_hv_synic * synic,u32 sint)423 static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
424 {
425 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
426 struct kvm_lapic_irq irq;
427 int ret, vector;
428
429 if (sint >= ARRAY_SIZE(synic->sint))
430 return -EINVAL;
431
432 vector = synic_get_sint_vector(synic_read_sint(synic, sint));
433 if (vector < 0)
434 return -ENOENT;
435
436 memset(&irq, 0, sizeof(irq));
437 irq.shorthand = APIC_DEST_SELF;
438 irq.dest_mode = APIC_DEST_PHYSICAL;
439 irq.delivery_mode = APIC_DM_FIXED;
440 irq.vector = vector;
441 irq.level = 1;
442
443 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
444 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
445 return ret;
446 }
447
kvm_hv_synic_set_irq(struct kvm * kvm,u32 vpidx,u32 sint)448 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
449 {
450 struct kvm_vcpu_hv_synic *synic;
451
452 synic = synic_get(kvm, vpidx);
453 if (!synic)
454 return -EINVAL;
455
456 return synic_set_irq(synic, sint);
457 }
458
kvm_hv_synic_send_eoi(struct kvm_vcpu * vcpu,int vector)459 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
460 {
461 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
462 int i;
463
464 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
465
466 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
467 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
468 kvm_hv_notify_acked_sint(vcpu, i);
469 }
470
kvm_hv_set_sint_gsi(struct kvm * kvm,u32 vpidx,u32 sint,int gsi)471 static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
472 {
473 struct kvm_vcpu_hv_synic *synic;
474
475 synic = synic_get(kvm, vpidx);
476 if (!synic)
477 return -EINVAL;
478
479 if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
480 return -EINVAL;
481
482 atomic_set(&synic->sint_to_gsi[sint], gsi);
483 return 0;
484 }
485
kvm_hv_irq_routing_update(struct kvm * kvm)486 void kvm_hv_irq_routing_update(struct kvm *kvm)
487 {
488 struct kvm_irq_routing_table *irq_rt;
489 struct kvm_kernel_irq_routing_entry *e;
490 u32 gsi;
491
492 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
493 lockdep_is_held(&kvm->irq_lock));
494
495 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
496 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
497 if (e->type == KVM_IRQ_ROUTING_HV_SINT)
498 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
499 e->hv_sint.sint, gsi);
500 }
501 }
502 }
503
synic_init(struct kvm_vcpu_hv_synic * synic)504 static void synic_init(struct kvm_vcpu_hv_synic *synic)
505 {
506 int i;
507
508 memset(synic, 0, sizeof(*synic));
509 synic->version = HV_SYNIC_VERSION_1;
510 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
511 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
512 atomic_set(&synic->sint_to_gsi[i], -1);
513 }
514 }
515
get_time_ref_counter(struct kvm * kvm)516 static u64 get_time_ref_counter(struct kvm *kvm)
517 {
518 struct kvm_hv *hv = to_kvm_hv(kvm);
519 struct kvm_vcpu *vcpu;
520 u64 tsc;
521
522 /*
523 * Fall back to get_kvmclock_ns() when TSC page hasn't been set up,
524 * is broken, disabled or being updated.
525 */
526 if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
527 return div_u64(get_kvmclock_ns(kvm), 100);
528
529 vcpu = kvm_get_vcpu(kvm, 0);
530 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
531 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
532 + hv->tsc_ref.tsc_offset;
533 }
534
stimer_mark_pending(struct kvm_vcpu_hv_stimer * stimer,bool vcpu_kick)535 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
536 bool vcpu_kick)
537 {
538 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
539
540 set_bit(stimer->index,
541 to_hv_vcpu(vcpu)->stimer_pending_bitmap);
542 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
543 if (vcpu_kick)
544 kvm_vcpu_kick(vcpu);
545 }
546
stimer_cleanup(struct kvm_vcpu_hv_stimer * stimer)547 static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
548 {
549 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
550
551 trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id,
552 stimer->index);
553
554 hrtimer_cancel(&stimer->timer);
555 clear_bit(stimer->index,
556 to_hv_vcpu(vcpu)->stimer_pending_bitmap);
557 stimer->msg_pending = false;
558 stimer->exp_time = 0;
559 }
560
stimer_timer_callback(struct hrtimer * timer)561 static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
562 {
563 struct kvm_vcpu_hv_stimer *stimer;
564
565 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
566 trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id,
567 stimer->index);
568 stimer_mark_pending(stimer, true);
569
570 return HRTIMER_NORESTART;
571 }
572
573 /*
574 * stimer_start() assumptions:
575 * a) stimer->count is not equal to 0
576 * b) stimer->config has HV_STIMER_ENABLE flag
577 */
stimer_start(struct kvm_vcpu_hv_stimer * stimer)578 static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
579 {
580 u64 time_now;
581 ktime_t ktime_now;
582
583 time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm);
584 ktime_now = ktime_get();
585
586 if (stimer->config.periodic) {
587 if (stimer->exp_time) {
588 if (time_now >= stimer->exp_time) {
589 u64 remainder;
590
591 div64_u64_rem(time_now - stimer->exp_time,
592 stimer->count, &remainder);
593 stimer->exp_time =
594 time_now + (stimer->count - remainder);
595 }
596 } else
597 stimer->exp_time = time_now + stimer->count;
598
599 trace_kvm_hv_stimer_start_periodic(
600 hv_stimer_to_vcpu(stimer)->vcpu_id,
601 stimer->index,
602 time_now, stimer->exp_time);
603
604 hrtimer_start(&stimer->timer,
605 ktime_add_ns(ktime_now,
606 100 * (stimer->exp_time - time_now)),
607 HRTIMER_MODE_ABS);
608 return 0;
609 }
610 stimer->exp_time = stimer->count;
611 if (time_now >= stimer->count) {
612 /*
613 * Expire timer according to Hypervisor Top-Level Functional
614 * specification v4(15.3.1):
615 * "If a one shot is enabled and the specified count is in
616 * the past, it will expire immediately."
617 */
618 stimer_mark_pending(stimer, false);
619 return 0;
620 }
621
622 trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id,
623 stimer->index,
624 time_now, stimer->count);
625
626 hrtimer_start(&stimer->timer,
627 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
628 HRTIMER_MODE_ABS);
629 return 0;
630 }
631
stimer_set_config(struct kvm_vcpu_hv_stimer * stimer,u64 config,bool host)632 static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
633 bool host)
634 {
635 union hv_stimer_config new_config = {.as_uint64 = config},
636 old_config = {.as_uint64 = stimer->config.as_uint64};
637 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
638 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
639
640 if (!synic->active && !host)
641 return 1;
642
643 trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id,
644 stimer->index, config, host);
645
646 stimer_cleanup(stimer);
647 if (old_config.enable &&
648 !new_config.direct_mode && new_config.sintx == 0)
649 new_config.enable = 0;
650 stimer->config.as_uint64 = new_config.as_uint64;
651
652 if (stimer->config.enable)
653 stimer_mark_pending(stimer, false);
654
655 return 0;
656 }
657
stimer_set_count(struct kvm_vcpu_hv_stimer * stimer,u64 count,bool host)658 static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
659 bool host)
660 {
661 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
662 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
663
664 if (!synic->active && !host)
665 return 1;
666
667 trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id,
668 stimer->index, count, host);
669
670 stimer_cleanup(stimer);
671 stimer->count = count;
672 if (stimer->count == 0)
673 stimer->config.enable = 0;
674 else if (stimer->config.auto_enable)
675 stimer->config.enable = 1;
676
677 if (stimer->config.enable)
678 stimer_mark_pending(stimer, false);
679
680 return 0;
681 }
682
stimer_get_config(struct kvm_vcpu_hv_stimer * stimer,u64 * pconfig)683 static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
684 {
685 *pconfig = stimer->config.as_uint64;
686 return 0;
687 }
688
stimer_get_count(struct kvm_vcpu_hv_stimer * stimer,u64 * pcount)689 static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
690 {
691 *pcount = stimer->count;
692 return 0;
693 }
694
synic_deliver_msg(struct kvm_vcpu_hv_synic * synic,u32 sint,struct hv_message * src_msg,bool no_retry)695 static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
696 struct hv_message *src_msg, bool no_retry)
697 {
698 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
699 int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
700 gfn_t msg_page_gfn;
701 struct hv_message_header hv_hdr;
702 int r;
703
704 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
705 return -ENOENT;
706
707 msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
708
709 /*
710 * Strictly following the spec-mandated ordering would assume setting
711 * .msg_pending before checking .message_type. However, this function
712 * is only called in vcpu context so the entire update is atomic from
713 * guest POV and thus the exact order here doesn't matter.
714 */
715 r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
716 msg_off + offsetof(struct hv_message,
717 header.message_type),
718 sizeof(hv_hdr.message_type));
719 if (r < 0)
720 return r;
721
722 if (hv_hdr.message_type != HVMSG_NONE) {
723 if (no_retry)
724 return 0;
725
726 hv_hdr.message_flags.msg_pending = 1;
727 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
728 &hv_hdr.message_flags,
729 msg_off +
730 offsetof(struct hv_message,
731 header.message_flags),
732 sizeof(hv_hdr.message_flags));
733 if (r < 0)
734 return r;
735 return -EAGAIN;
736 }
737
738 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
739 sizeof(src_msg->header) +
740 src_msg->header.payload_size);
741 if (r < 0)
742 return r;
743
744 r = synic_set_irq(synic, sint);
745 if (r < 0)
746 return r;
747 if (r == 0)
748 return -EFAULT;
749 return 0;
750 }
751
stimer_send_msg(struct kvm_vcpu_hv_stimer * stimer)752 static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
753 {
754 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
755 struct hv_message *msg = &stimer->msg;
756 struct hv_timer_message_payload *payload =
757 (struct hv_timer_message_payload *)&msg->u.payload;
758
759 /*
760 * To avoid piling up periodic ticks, don't retry message
761 * delivery for them (within "lazy" lost ticks policy).
762 */
763 bool no_retry = stimer->config.periodic;
764
765 payload->expiration_time = stimer->exp_time;
766 payload->delivery_time = get_time_ref_counter(vcpu->kvm);
767 return synic_deliver_msg(to_hv_synic(vcpu),
768 stimer->config.sintx, msg,
769 no_retry);
770 }
771
stimer_notify_direct(struct kvm_vcpu_hv_stimer * stimer)772 static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
773 {
774 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
775 struct kvm_lapic_irq irq = {
776 .delivery_mode = APIC_DM_FIXED,
777 .vector = stimer->config.apic_vector
778 };
779
780 if (lapic_in_kernel(vcpu))
781 return !kvm_apic_set_irq(vcpu, &irq, NULL);
782 return 0;
783 }
784
stimer_expiration(struct kvm_vcpu_hv_stimer * stimer)785 static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
786 {
787 int r, direct = stimer->config.direct_mode;
788
789 stimer->msg_pending = true;
790 if (!direct)
791 r = stimer_send_msg(stimer);
792 else
793 r = stimer_notify_direct(stimer);
794 trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id,
795 stimer->index, direct, r);
796 if (!r) {
797 stimer->msg_pending = false;
798 if (!(stimer->config.periodic))
799 stimer->config.enable = 0;
800 }
801 }
802
kvm_hv_process_stimers(struct kvm_vcpu * vcpu)803 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
804 {
805 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
806 struct kvm_vcpu_hv_stimer *stimer;
807 u64 time_now, exp_time;
808 int i;
809
810 if (!hv_vcpu)
811 return;
812
813 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
814 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
815 stimer = &hv_vcpu->stimer[i];
816 if (stimer->config.enable) {
817 exp_time = stimer->exp_time;
818
819 if (exp_time) {
820 time_now =
821 get_time_ref_counter(vcpu->kvm);
822 if (time_now >= exp_time)
823 stimer_expiration(stimer);
824 }
825
826 if ((stimer->config.enable) &&
827 stimer->count) {
828 if (!stimer->msg_pending)
829 stimer_start(stimer);
830 } else
831 stimer_cleanup(stimer);
832 }
833 }
834 }
835
kvm_hv_vcpu_uninit(struct kvm_vcpu * vcpu)836 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
837 {
838 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
839 int i;
840
841 if (!hv_vcpu)
842 return;
843
844 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
845 stimer_cleanup(&hv_vcpu->stimer[i]);
846
847 kfree(hv_vcpu);
848 vcpu->arch.hyperv = NULL;
849 }
850
kvm_hv_assist_page_enabled(struct kvm_vcpu * vcpu)851 bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
852 {
853 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
854
855 if (!hv_vcpu)
856 return false;
857
858 if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
859 return false;
860 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
861 }
862 EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
863
kvm_hv_get_assist_page(struct kvm_vcpu * vcpu,struct hv_vp_assist_page * assist_page)864 bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
865 struct hv_vp_assist_page *assist_page)
866 {
867 if (!kvm_hv_assist_page_enabled(vcpu))
868 return false;
869 return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
870 assist_page, sizeof(*assist_page));
871 }
872 EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
873
stimer_prepare_msg(struct kvm_vcpu_hv_stimer * stimer)874 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
875 {
876 struct hv_message *msg = &stimer->msg;
877 struct hv_timer_message_payload *payload =
878 (struct hv_timer_message_payload *)&msg->u.payload;
879
880 memset(&msg->header, 0, sizeof(msg->header));
881 msg->header.message_type = HVMSG_TIMER_EXPIRED;
882 msg->header.payload_size = sizeof(*payload);
883
884 payload->timer_index = stimer->index;
885 payload->expiration_time = 0;
886 payload->delivery_time = 0;
887 }
888
stimer_init(struct kvm_vcpu_hv_stimer * stimer,int timer_index)889 static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
890 {
891 memset(stimer, 0, sizeof(*stimer));
892 stimer->index = timer_index;
893 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
894 stimer->timer.function = stimer_timer_callback;
895 stimer_prepare_msg(stimer);
896 }
897
kvm_hv_vcpu_init(struct kvm_vcpu * vcpu)898 static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
899 {
900 struct kvm_vcpu_hv *hv_vcpu;
901 int i;
902
903 hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT);
904 if (!hv_vcpu)
905 return -ENOMEM;
906
907 vcpu->arch.hyperv = hv_vcpu;
908 hv_vcpu->vcpu = vcpu;
909
910 synic_init(&hv_vcpu->synic);
911
912 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
913 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
914 stimer_init(&hv_vcpu->stimer[i], i);
915
916 hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
917
918 return 0;
919 }
920
kvm_hv_activate_synic(struct kvm_vcpu * vcpu,bool dont_zero_synic_pages)921 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
922 {
923 struct kvm_vcpu_hv_synic *synic;
924 int r;
925
926 if (!to_hv_vcpu(vcpu)) {
927 r = kvm_hv_vcpu_init(vcpu);
928 if (r)
929 return r;
930 }
931
932 synic = to_hv_synic(vcpu);
933
934 /*
935 * Hyper-V SynIC auto EOI SINT's are
936 * not compatible with APICV, so request
937 * to deactivate APICV permanently.
938 */
939 kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_HYPERV);
940 synic->active = true;
941 synic->dont_zero_synic_pages = dont_zero_synic_pages;
942 synic->control = HV_SYNIC_CONTROL_ENABLE;
943 return 0;
944 }
945
kvm_hv_msr_partition_wide(u32 msr)946 static bool kvm_hv_msr_partition_wide(u32 msr)
947 {
948 bool r = false;
949
950 switch (msr) {
951 case HV_X64_MSR_GUEST_OS_ID:
952 case HV_X64_MSR_HYPERCALL:
953 case HV_X64_MSR_REFERENCE_TSC:
954 case HV_X64_MSR_TIME_REF_COUNT:
955 case HV_X64_MSR_CRASH_CTL:
956 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
957 case HV_X64_MSR_RESET:
958 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
959 case HV_X64_MSR_TSC_EMULATION_CONTROL:
960 case HV_X64_MSR_TSC_EMULATION_STATUS:
961 case HV_X64_MSR_SYNDBG_OPTIONS:
962 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
963 r = true;
964 break;
965 }
966
967 return r;
968 }
969
kvm_hv_msr_get_crash_data(struct kvm * kvm,u32 index,u64 * pdata)970 static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata)
971 {
972 struct kvm_hv *hv = to_kvm_hv(kvm);
973 size_t size = ARRAY_SIZE(hv->hv_crash_param);
974
975 if (WARN_ON_ONCE(index >= size))
976 return -EINVAL;
977
978 *pdata = hv->hv_crash_param[array_index_nospec(index, size)];
979 return 0;
980 }
981
kvm_hv_msr_get_crash_ctl(struct kvm * kvm,u64 * pdata)982 static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata)
983 {
984 struct kvm_hv *hv = to_kvm_hv(kvm);
985
986 *pdata = hv->hv_crash_ctl;
987 return 0;
988 }
989
kvm_hv_msr_set_crash_ctl(struct kvm * kvm,u64 data)990 static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data)
991 {
992 struct kvm_hv *hv = to_kvm_hv(kvm);
993
994 hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
995
996 return 0;
997 }
998
kvm_hv_msr_set_crash_data(struct kvm * kvm,u32 index,u64 data)999 static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data)
1000 {
1001 struct kvm_hv *hv = to_kvm_hv(kvm);
1002 size_t size = ARRAY_SIZE(hv->hv_crash_param);
1003
1004 if (WARN_ON_ONCE(index >= size))
1005 return -EINVAL;
1006
1007 hv->hv_crash_param[array_index_nospec(index, size)] = data;
1008 return 0;
1009 }
1010
1011 /*
1012 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
1013 * between them is possible:
1014 *
1015 * kvmclock formula:
1016 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
1017 * + system_time
1018 *
1019 * Hyper-V formula:
1020 * nsec/100 = ticks * scale / 2^64 + offset
1021 *
1022 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
1023 * By dividing the kvmclock formula by 100 and equating what's left we get:
1024 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1025 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
1026 * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
1027 *
1028 * Now expand the kvmclock formula and divide by 100:
1029 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
1030 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
1031 * + system_time
1032 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1033 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1034 * + system_time / 100
1035 *
1036 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
1037 * nsec/100 = ticks * scale / 2^64
1038 * - tsc_timestamp * scale / 2^64
1039 * + system_time / 100
1040 *
1041 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
1042 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
1043 *
1044 * These two equivalencies are implemented in this function.
1045 */
compute_tsc_page_parameters(struct pvclock_vcpu_time_info * hv_clock,struct ms_hyperv_tsc_page * tsc_ref)1046 static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
1047 struct ms_hyperv_tsc_page *tsc_ref)
1048 {
1049 u64 max_mul;
1050
1051 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
1052 return false;
1053
1054 /*
1055 * check if scale would overflow, if so we use the time ref counter
1056 * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
1057 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
1058 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
1059 */
1060 max_mul = 100ull << (32 - hv_clock->tsc_shift);
1061 if (hv_clock->tsc_to_system_mul >= max_mul)
1062 return false;
1063
1064 /*
1065 * Otherwise compute the scale and offset according to the formulas
1066 * derived above.
1067 */
1068 tsc_ref->tsc_scale =
1069 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
1070 hv_clock->tsc_to_system_mul,
1071 100);
1072
1073 tsc_ref->tsc_offset = hv_clock->system_time;
1074 do_div(tsc_ref->tsc_offset, 100);
1075 tsc_ref->tsc_offset -=
1076 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
1077 return true;
1078 }
1079
1080 /*
1081 * Don't touch TSC page values if the guest has opted for TSC emulation after
1082 * migration. KVM doesn't fully support reenlightenment notifications and TSC
1083 * access emulation and Hyper-V is known to expect the values in TSC page to
1084 * stay constant before TSC access emulation is disabled from guest side
1085 * (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC
1086 * frequency and guest visible TSC value across migration (and prevent it when
1087 * TSC scaling is unsupported).
1088 */
tsc_page_update_unsafe(struct kvm_hv * hv)1089 static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
1090 {
1091 return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) &&
1092 hv->hv_tsc_emulation_control;
1093 }
1094
kvm_hv_setup_tsc_page(struct kvm * kvm,struct pvclock_vcpu_time_info * hv_clock)1095 void kvm_hv_setup_tsc_page(struct kvm *kvm,
1096 struct pvclock_vcpu_time_info *hv_clock)
1097 {
1098 struct kvm_hv *hv = to_kvm_hv(kvm);
1099 u32 tsc_seq;
1100 u64 gfn;
1101
1102 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
1103 BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
1104
1105 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
1106 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
1107 return;
1108
1109 mutex_lock(&hv->hv_lock);
1110 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1111 goto out_unlock;
1112
1113 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1114 /*
1115 * Because the TSC parameters only vary when there is a
1116 * change in the master clock, do not bother with caching.
1117 */
1118 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
1119 &tsc_seq, sizeof(tsc_seq))))
1120 goto out_err;
1121
1122 if (tsc_seq && tsc_page_update_unsafe(hv)) {
1123 if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1124 goto out_err;
1125
1126 hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1127 goto out_unlock;
1128 }
1129
1130 /*
1131 * While we're computing and writing the parameters, force the
1132 * guest to use the time reference count MSR.
1133 */
1134 hv->tsc_ref.tsc_sequence = 0;
1135 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1136 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1137 goto out_err;
1138
1139 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
1140 goto out_err;
1141
1142 /* Ensure sequence is zero before writing the rest of the struct. */
1143 smp_wmb();
1144 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1145 goto out_err;
1146
1147 /*
1148 * Now switch to the TSC page mechanism by writing the sequence.
1149 */
1150 tsc_seq++;
1151 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
1152 tsc_seq = 1;
1153
1154 /* Write the struct entirely before the non-zero sequence. */
1155 smp_wmb();
1156
1157 hv->tsc_ref.tsc_sequence = tsc_seq;
1158 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1159 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1160 goto out_err;
1161
1162 hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1163 goto out_unlock;
1164
1165 out_err:
1166 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
1167 out_unlock:
1168 mutex_unlock(&hv->hv_lock);
1169 }
1170
kvm_hv_invalidate_tsc_page(struct kvm * kvm)1171 void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
1172 {
1173 struct kvm_hv *hv = to_kvm_hv(kvm);
1174 u64 gfn;
1175
1176 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
1177 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
1178 tsc_page_update_unsafe(hv))
1179 return;
1180
1181 mutex_lock(&hv->hv_lock);
1182
1183 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1184 goto out_unlock;
1185
1186 /* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */
1187 if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
1188 hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
1189
1190 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1191
1192 hv->tsc_ref.tsc_sequence = 0;
1193 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1194 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1195 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
1196
1197 out_unlock:
1198 mutex_unlock(&hv->hv_lock);
1199 }
1200
kvm_hv_set_msr_pw(struct kvm_vcpu * vcpu,u32 msr,u64 data,bool host)1201 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1202 bool host)
1203 {
1204 struct kvm *kvm = vcpu->kvm;
1205 struct kvm_hv *hv = to_kvm_hv(kvm);
1206
1207 switch (msr) {
1208 case HV_X64_MSR_GUEST_OS_ID:
1209 hv->hv_guest_os_id = data;
1210 /* setting guest os id to zero disables hypercall page */
1211 if (!hv->hv_guest_os_id)
1212 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1213 break;
1214 case HV_X64_MSR_HYPERCALL: {
1215 u8 instructions[9];
1216 int i = 0;
1217 u64 addr;
1218
1219 /* if guest os id is not set hypercall should remain disabled */
1220 if (!hv->hv_guest_os_id)
1221 break;
1222 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1223 hv->hv_hypercall = data;
1224 break;
1225 }
1226
1227 /*
1228 * If Xen and Hyper-V hypercalls are both enabled, disambiguate
1229 * the same way Xen itself does, by setting the bit 31 of EAX
1230 * which is RsvdZ in the 32-bit Hyper-V hypercall ABI and just
1231 * going to be clobbered on 64-bit.
1232 */
1233 if (kvm_xen_hypercall_enabled(kvm)) {
1234 /* orl $0x80000000, %eax */
1235 instructions[i++] = 0x0d;
1236 instructions[i++] = 0x00;
1237 instructions[i++] = 0x00;
1238 instructions[i++] = 0x00;
1239 instructions[i++] = 0x80;
1240 }
1241
1242 /* vmcall/vmmcall */
1243 static_call(kvm_x86_patch_hypercall)(vcpu, instructions + i);
1244 i += 3;
1245
1246 /* ret */
1247 ((unsigned char *)instructions)[i++] = 0xc3;
1248
1249 addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK;
1250 if (kvm_vcpu_write_guest(vcpu, addr, instructions, i))
1251 return 1;
1252 hv->hv_hypercall = data;
1253 break;
1254 }
1255 case HV_X64_MSR_REFERENCE_TSC:
1256 hv->hv_tsc_page = data;
1257 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
1258 if (!host)
1259 hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
1260 else
1261 hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1262 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1263 } else {
1264 hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
1265 }
1266 break;
1267 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1268 return kvm_hv_msr_set_crash_data(kvm,
1269 msr - HV_X64_MSR_CRASH_P0,
1270 data);
1271 case HV_X64_MSR_CRASH_CTL:
1272 if (host)
1273 return kvm_hv_msr_set_crash_ctl(kvm, data);
1274
1275 if (data & HV_CRASH_CTL_CRASH_NOTIFY) {
1276 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
1277 hv->hv_crash_param[0],
1278 hv->hv_crash_param[1],
1279 hv->hv_crash_param[2],
1280 hv->hv_crash_param[3],
1281 hv->hv_crash_param[4]);
1282
1283 /* Send notification about crash to user space */
1284 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
1285 }
1286 break;
1287 case HV_X64_MSR_RESET:
1288 if (data == 1) {
1289 vcpu_debug(vcpu, "hyper-v reset requested\n");
1290 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1291 }
1292 break;
1293 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1294 hv->hv_reenlightenment_control = data;
1295 break;
1296 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1297 hv->hv_tsc_emulation_control = data;
1298 break;
1299 case HV_X64_MSR_TSC_EMULATION_STATUS:
1300 if (data && !host)
1301 return 1;
1302
1303 hv->hv_tsc_emulation_status = data;
1304 break;
1305 case HV_X64_MSR_TIME_REF_COUNT:
1306 /* read-only, but still ignore it if host-initiated */
1307 if (!host)
1308 return 1;
1309 break;
1310 case HV_X64_MSR_SYNDBG_OPTIONS:
1311 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1312 return syndbg_set_msr(vcpu, msr, data, host);
1313 default:
1314 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1315 msr, data);
1316 return 1;
1317 }
1318 return 0;
1319 }
1320
1321 /* Calculate cpu time spent by current task in 100ns units */
current_task_runtime_100ns(void)1322 static u64 current_task_runtime_100ns(void)
1323 {
1324 u64 utime, stime;
1325
1326 task_cputime_adjusted(current, &utime, &stime);
1327
1328 return div_u64(utime + stime, 100);
1329 }
1330
kvm_hv_set_msr(struct kvm_vcpu * vcpu,u32 msr,u64 data,bool host)1331 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1332 {
1333 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1334
1335 switch (msr) {
1336 case HV_X64_MSR_VP_INDEX: {
1337 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1338 int vcpu_idx = kvm_vcpu_get_idx(vcpu);
1339 u32 new_vp_index = (u32)data;
1340
1341 if (!host || new_vp_index >= KVM_MAX_VCPUS)
1342 return 1;
1343
1344 if (new_vp_index == hv_vcpu->vp_index)
1345 return 0;
1346
1347 /*
1348 * The VP index is initialized to vcpu_index by
1349 * kvm_hv_vcpu_postcreate so they initially match. Now the
1350 * VP index is changing, adjust num_mismatched_vp_indexes if
1351 * it now matches or no longer matches vcpu_idx.
1352 */
1353 if (hv_vcpu->vp_index == vcpu_idx)
1354 atomic_inc(&hv->num_mismatched_vp_indexes);
1355 else if (new_vp_index == vcpu_idx)
1356 atomic_dec(&hv->num_mismatched_vp_indexes);
1357
1358 hv_vcpu->vp_index = new_vp_index;
1359 break;
1360 }
1361 case HV_X64_MSR_VP_ASSIST_PAGE: {
1362 u64 gfn;
1363 unsigned long addr;
1364
1365 if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1366 hv_vcpu->hv_vapic = data;
1367 if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
1368 return 1;
1369 break;
1370 }
1371 gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1372 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1373 if (kvm_is_error_hva(addr))
1374 return 1;
1375
1376 /*
1377 * Clear apic_assist portion of struct hv_vp_assist_page
1378 * only, there can be valuable data in the rest which needs
1379 * to be preserved e.g. on migration.
1380 */
1381 if (__put_user(0, (u32 __user *)addr))
1382 return 1;
1383 hv_vcpu->hv_vapic = data;
1384 kvm_vcpu_mark_page_dirty(vcpu, gfn);
1385 if (kvm_lapic_enable_pv_eoi(vcpu,
1386 gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
1387 sizeof(struct hv_vp_assist_page)))
1388 return 1;
1389 break;
1390 }
1391 case HV_X64_MSR_EOI:
1392 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1393 case HV_X64_MSR_ICR:
1394 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1395 case HV_X64_MSR_TPR:
1396 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1397 case HV_X64_MSR_VP_RUNTIME:
1398 if (!host)
1399 return 1;
1400 hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1401 break;
1402 case HV_X64_MSR_SCONTROL:
1403 case HV_X64_MSR_SVERSION:
1404 case HV_X64_MSR_SIEFP:
1405 case HV_X64_MSR_SIMP:
1406 case HV_X64_MSR_EOM:
1407 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1408 return synic_set_msr(to_hv_synic(vcpu), msr, data, host);
1409 case HV_X64_MSR_STIMER0_CONFIG:
1410 case HV_X64_MSR_STIMER1_CONFIG:
1411 case HV_X64_MSR_STIMER2_CONFIG:
1412 case HV_X64_MSR_STIMER3_CONFIG: {
1413 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1414
1415 return stimer_set_config(to_hv_stimer(vcpu, timer_index),
1416 data, host);
1417 }
1418 case HV_X64_MSR_STIMER0_COUNT:
1419 case HV_X64_MSR_STIMER1_COUNT:
1420 case HV_X64_MSR_STIMER2_COUNT:
1421 case HV_X64_MSR_STIMER3_COUNT: {
1422 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1423
1424 return stimer_set_count(to_hv_stimer(vcpu, timer_index),
1425 data, host);
1426 }
1427 case HV_X64_MSR_TSC_FREQUENCY:
1428 case HV_X64_MSR_APIC_FREQUENCY:
1429 /* read-only, but still ignore it if host-initiated */
1430 if (!host)
1431 return 1;
1432 break;
1433 default:
1434 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1435 msr, data);
1436 return 1;
1437 }
1438
1439 return 0;
1440 }
1441
kvm_hv_get_msr_pw(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata,bool host)1442 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1443 bool host)
1444 {
1445 u64 data = 0;
1446 struct kvm *kvm = vcpu->kvm;
1447 struct kvm_hv *hv = to_kvm_hv(kvm);
1448
1449 switch (msr) {
1450 case HV_X64_MSR_GUEST_OS_ID:
1451 data = hv->hv_guest_os_id;
1452 break;
1453 case HV_X64_MSR_HYPERCALL:
1454 data = hv->hv_hypercall;
1455 break;
1456 case HV_X64_MSR_TIME_REF_COUNT:
1457 data = get_time_ref_counter(kvm);
1458 break;
1459 case HV_X64_MSR_REFERENCE_TSC:
1460 data = hv->hv_tsc_page;
1461 break;
1462 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1463 return kvm_hv_msr_get_crash_data(kvm,
1464 msr - HV_X64_MSR_CRASH_P0,
1465 pdata);
1466 case HV_X64_MSR_CRASH_CTL:
1467 return kvm_hv_msr_get_crash_ctl(kvm, pdata);
1468 case HV_X64_MSR_RESET:
1469 data = 0;
1470 break;
1471 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1472 data = hv->hv_reenlightenment_control;
1473 break;
1474 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1475 data = hv->hv_tsc_emulation_control;
1476 break;
1477 case HV_X64_MSR_TSC_EMULATION_STATUS:
1478 data = hv->hv_tsc_emulation_status;
1479 break;
1480 case HV_X64_MSR_SYNDBG_OPTIONS:
1481 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1482 return syndbg_get_msr(vcpu, msr, pdata, host);
1483 default:
1484 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1485 return 1;
1486 }
1487
1488 *pdata = data;
1489 return 0;
1490 }
1491
kvm_hv_get_msr(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata,bool host)1492 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1493 bool host)
1494 {
1495 u64 data = 0;
1496 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1497
1498 switch (msr) {
1499 case HV_X64_MSR_VP_INDEX:
1500 data = hv_vcpu->vp_index;
1501 break;
1502 case HV_X64_MSR_EOI:
1503 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1504 case HV_X64_MSR_ICR:
1505 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1506 case HV_X64_MSR_TPR:
1507 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1508 case HV_X64_MSR_VP_ASSIST_PAGE:
1509 data = hv_vcpu->hv_vapic;
1510 break;
1511 case HV_X64_MSR_VP_RUNTIME:
1512 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1513 break;
1514 case HV_X64_MSR_SCONTROL:
1515 case HV_X64_MSR_SVERSION:
1516 case HV_X64_MSR_SIEFP:
1517 case HV_X64_MSR_SIMP:
1518 case HV_X64_MSR_EOM:
1519 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1520 return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host);
1521 case HV_X64_MSR_STIMER0_CONFIG:
1522 case HV_X64_MSR_STIMER1_CONFIG:
1523 case HV_X64_MSR_STIMER2_CONFIG:
1524 case HV_X64_MSR_STIMER3_CONFIG: {
1525 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1526
1527 return stimer_get_config(to_hv_stimer(vcpu, timer_index),
1528 pdata);
1529 }
1530 case HV_X64_MSR_STIMER0_COUNT:
1531 case HV_X64_MSR_STIMER1_COUNT:
1532 case HV_X64_MSR_STIMER2_COUNT:
1533 case HV_X64_MSR_STIMER3_COUNT: {
1534 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1535
1536 return stimer_get_count(to_hv_stimer(vcpu, timer_index),
1537 pdata);
1538 }
1539 case HV_X64_MSR_TSC_FREQUENCY:
1540 data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1541 break;
1542 case HV_X64_MSR_APIC_FREQUENCY:
1543 data = APIC_BUS_FREQUENCY;
1544 break;
1545 default:
1546 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1547 return 1;
1548 }
1549 *pdata = data;
1550 return 0;
1551 }
1552
kvm_hv_set_msr_common(struct kvm_vcpu * vcpu,u32 msr,u64 data,bool host)1553 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1554 {
1555 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1556
1557 if (!host && !vcpu->arch.hyperv_enabled)
1558 return 1;
1559
1560 if (!to_hv_vcpu(vcpu)) {
1561 if (kvm_hv_vcpu_init(vcpu))
1562 return 1;
1563 }
1564
1565 if (kvm_hv_msr_partition_wide(msr)) {
1566 int r;
1567
1568 mutex_lock(&hv->hv_lock);
1569 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1570 mutex_unlock(&hv->hv_lock);
1571 return r;
1572 } else
1573 return kvm_hv_set_msr(vcpu, msr, data, host);
1574 }
1575
kvm_hv_get_msr_common(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata,bool host)1576 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1577 {
1578 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1579
1580 if (!host && !vcpu->arch.hyperv_enabled)
1581 return 1;
1582
1583 if (!to_hv_vcpu(vcpu)) {
1584 if (kvm_hv_vcpu_init(vcpu))
1585 return 1;
1586 }
1587
1588 if (kvm_hv_msr_partition_wide(msr)) {
1589 int r;
1590
1591 mutex_lock(&hv->hv_lock);
1592 r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
1593 mutex_unlock(&hv->hv_lock);
1594 return r;
1595 } else
1596 return kvm_hv_get_msr(vcpu, msr, pdata, host);
1597 }
1598
sparse_set_to_vcpu_mask(struct kvm * kvm,u64 * sparse_banks,u64 valid_bank_mask,u64 * vp_bitmap,unsigned long * vcpu_bitmap)1599 static __always_inline unsigned long *sparse_set_to_vcpu_mask(
1600 struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask,
1601 u64 *vp_bitmap, unsigned long *vcpu_bitmap)
1602 {
1603 struct kvm_hv *hv = to_kvm_hv(kvm);
1604 struct kvm_vcpu *vcpu;
1605 int i, bank, sbank = 0;
1606
1607 memset(vp_bitmap, 0,
1608 KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap));
1609 for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
1610 KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
1611 vp_bitmap[bank] = sparse_banks[sbank++];
1612
1613 if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) {
1614 /* for all vcpus vp_index == vcpu_idx */
1615 return (unsigned long *)vp_bitmap;
1616 }
1617
1618 bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
1619 kvm_for_each_vcpu(i, vcpu, kvm) {
1620 if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap))
1621 __set_bit(i, vcpu_bitmap);
1622 }
1623 return vcpu_bitmap;
1624 }
1625
kvm_hv_flush_tlb(struct kvm_vcpu * vcpu,u64 ingpa,u16 rep_cnt,bool ex)1626 static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool ex)
1627 {
1628 struct kvm *kvm = vcpu->kvm;
1629 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1630 struct hv_tlb_flush_ex flush_ex;
1631 struct hv_tlb_flush flush;
1632 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1633 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1634 unsigned long *vcpu_mask;
1635 u64 valid_bank_mask;
1636 u64 sparse_banks[64];
1637 int sparse_banks_len;
1638 bool all_cpus;
1639
1640 if (!ex) {
1641 if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush))))
1642 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1643
1644 trace_kvm_hv_flush_tlb(flush.processor_mask,
1645 flush.address_space, flush.flags);
1646
1647 valid_bank_mask = BIT_ULL(0);
1648 sparse_banks[0] = flush.processor_mask;
1649
1650 /*
1651 * Work around possible WS2012 bug: it sends hypercalls
1652 * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
1653 * while also expecting us to flush something and crashing if
1654 * we don't. Let's treat processor_mask == 0 same as
1655 * HV_FLUSH_ALL_PROCESSORS.
1656 */
1657 all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
1658 flush.processor_mask == 0;
1659 } else {
1660 if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
1661 sizeof(flush_ex))))
1662 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1663
1664 trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
1665 flush_ex.hv_vp_set.format,
1666 flush_ex.address_space,
1667 flush_ex.flags);
1668
1669 valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
1670 all_cpus = flush_ex.hv_vp_set.format !=
1671 HV_GENERIC_SET_SPARSE_4K;
1672
1673 sparse_banks_len =
1674 bitmap_weight((unsigned long *)&valid_bank_mask, 64) *
1675 sizeof(sparse_banks[0]);
1676
1677 if (!sparse_banks_len && !all_cpus)
1678 goto ret_success;
1679
1680 if (!all_cpus &&
1681 kvm_read_guest(kvm,
1682 ingpa + offsetof(struct hv_tlb_flush_ex,
1683 hv_vp_set.bank_contents),
1684 sparse_banks,
1685 sparse_banks_len))
1686 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1687 }
1688
1689 cpumask_clear(&hv_vcpu->tlb_flush);
1690
1691 vcpu_mask = all_cpus ? NULL :
1692 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1693 vp_bitmap, vcpu_bitmap);
1694
1695 /*
1696 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
1697 * analyze it here, flush TLB regardless of the specified address space.
1698 */
1699 kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH,
1700 NULL, vcpu_mask, &hv_vcpu->tlb_flush);
1701
1702 ret_success:
1703 /* We always do full TLB flush, set rep_done = rep_cnt. */
1704 return (u64)HV_STATUS_SUCCESS |
1705 ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
1706 }
1707
kvm_send_ipi_to_many(struct kvm * kvm,u32 vector,unsigned long * vcpu_bitmap)1708 static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
1709 unsigned long *vcpu_bitmap)
1710 {
1711 struct kvm_lapic_irq irq = {
1712 .delivery_mode = APIC_DM_FIXED,
1713 .vector = vector
1714 };
1715 struct kvm_vcpu *vcpu;
1716 int i;
1717
1718 kvm_for_each_vcpu(i, vcpu, kvm) {
1719 if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
1720 continue;
1721
1722 /* We fail only when APIC is disabled */
1723 kvm_apic_set_irq(vcpu, &irq, NULL);
1724 }
1725 }
1726
kvm_hv_send_ipi(struct kvm_vcpu * vcpu,u64 ingpa,u64 outgpa,bool ex,bool fast)1727 static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, u64 ingpa, u64 outgpa,
1728 bool ex, bool fast)
1729 {
1730 struct kvm *kvm = vcpu->kvm;
1731 struct hv_send_ipi_ex send_ipi_ex;
1732 struct hv_send_ipi send_ipi;
1733 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1734 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1735 unsigned long *vcpu_mask;
1736 unsigned long valid_bank_mask;
1737 u64 sparse_banks[64];
1738 int sparse_banks_len;
1739 u32 vector;
1740 bool all_cpus;
1741
1742 if (!ex) {
1743 if (!fast) {
1744 if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi,
1745 sizeof(send_ipi))))
1746 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1747 sparse_banks[0] = send_ipi.cpu_mask;
1748 vector = send_ipi.vector;
1749 } else {
1750 /* 'reserved' part of hv_send_ipi should be 0 */
1751 if (unlikely(ingpa >> 32 != 0))
1752 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1753 sparse_banks[0] = outgpa;
1754 vector = (u32)ingpa;
1755 }
1756 all_cpus = false;
1757 valid_bank_mask = BIT_ULL(0);
1758
1759 trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
1760 } else {
1761 if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex,
1762 sizeof(send_ipi_ex))))
1763 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1764
1765 trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
1766 send_ipi_ex.vp_set.format,
1767 send_ipi_ex.vp_set.valid_bank_mask);
1768
1769 vector = send_ipi_ex.vector;
1770 valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
1771 sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
1772 sizeof(sparse_banks[0]);
1773
1774 all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
1775
1776 if (!sparse_banks_len)
1777 goto ret_success;
1778
1779 if (!all_cpus &&
1780 kvm_read_guest(kvm,
1781 ingpa + offsetof(struct hv_send_ipi_ex,
1782 vp_set.bank_contents),
1783 sparse_banks,
1784 sparse_banks_len))
1785 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1786 }
1787
1788 if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
1789 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1790
1791 vcpu_mask = all_cpus ? NULL :
1792 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1793 vp_bitmap, vcpu_bitmap);
1794
1795 kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
1796
1797 ret_success:
1798 return HV_STATUS_SUCCESS;
1799 }
1800
kvm_hv_set_cpuid(struct kvm_vcpu * vcpu)1801 void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
1802 {
1803 struct kvm_cpuid_entry2 *entry;
1804
1805 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
1806 if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX)
1807 vcpu->arch.hyperv_enabled = true;
1808 else
1809 vcpu->arch.hyperv_enabled = false;
1810 }
1811
kvm_hv_hypercall_enabled(struct kvm_vcpu * vcpu)1812 bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
1813 {
1814 return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id;
1815 }
1816
kvm_hv_hypercall_set_result(struct kvm_vcpu * vcpu,u64 result)1817 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
1818 {
1819 bool longmode;
1820
1821 longmode = is_64_bit_mode(vcpu);
1822 if (longmode)
1823 kvm_rax_write(vcpu, result);
1824 else {
1825 kvm_rdx_write(vcpu, result >> 32);
1826 kvm_rax_write(vcpu, result & 0xffffffff);
1827 }
1828 }
1829
kvm_hv_hypercall_complete(struct kvm_vcpu * vcpu,u64 result)1830 static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
1831 {
1832 kvm_hv_hypercall_set_result(vcpu, result);
1833 ++vcpu->stat.hypercalls;
1834 return kvm_skip_emulated_instruction(vcpu);
1835 }
1836
kvm_hv_hypercall_complete_userspace(struct kvm_vcpu * vcpu)1837 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
1838 {
1839 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
1840 }
1841
kvm_hvcall_signal_event(struct kvm_vcpu * vcpu,bool fast,u64 param)1842 static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
1843 {
1844 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1845 struct eventfd_ctx *eventfd;
1846
1847 if (unlikely(!fast)) {
1848 int ret;
1849 gpa_t gpa = param;
1850
1851 if ((gpa & (__alignof__(param) - 1)) ||
1852 offset_in_page(gpa) + sizeof(param) > PAGE_SIZE)
1853 return HV_STATUS_INVALID_ALIGNMENT;
1854
1855 ret = kvm_vcpu_read_guest(vcpu, gpa, ¶m, sizeof(param));
1856 if (ret < 0)
1857 return HV_STATUS_INVALID_ALIGNMENT;
1858 }
1859
1860 /*
1861 * Per spec, bits 32-47 contain the extra "flag number". However, we
1862 * have no use for it, and in all known usecases it is zero, so just
1863 * report lookup failure if it isn't.
1864 */
1865 if (param & 0xffff00000000ULL)
1866 return HV_STATUS_INVALID_PORT_ID;
1867 /* remaining bits are reserved-zero */
1868 if (param & ~KVM_HYPERV_CONN_ID_MASK)
1869 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1870
1871 /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
1872 rcu_read_lock();
1873 eventfd = idr_find(&hv->conn_to_evt, param);
1874 rcu_read_unlock();
1875 if (!eventfd)
1876 return HV_STATUS_INVALID_PORT_ID;
1877
1878 eventfd_signal(eventfd, 1);
1879 return HV_STATUS_SUCCESS;
1880 }
1881
kvm_hv_hypercall(struct kvm_vcpu * vcpu)1882 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1883 {
1884 u64 param, ingpa, outgpa, ret = HV_STATUS_SUCCESS;
1885 uint16_t code, rep_idx, rep_cnt;
1886 bool fast, rep;
1887
1888 /*
1889 * hypercall generates UD from non zero cpl and real mode
1890 * per HYPER-V spec
1891 */
1892 if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
1893 kvm_queue_exception(vcpu, UD_VECTOR);
1894 return 1;
1895 }
1896
1897 #ifdef CONFIG_X86_64
1898 if (is_64_bit_mode(vcpu)) {
1899 param = kvm_rcx_read(vcpu);
1900 ingpa = kvm_rdx_read(vcpu);
1901 outgpa = kvm_r8_read(vcpu);
1902 } else
1903 #endif
1904 {
1905 param = ((u64)kvm_rdx_read(vcpu) << 32) |
1906 (kvm_rax_read(vcpu) & 0xffffffff);
1907 ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
1908 (kvm_rcx_read(vcpu) & 0xffffffff);
1909 outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
1910 (kvm_rsi_read(vcpu) & 0xffffffff);
1911 }
1912
1913 code = param & 0xffff;
1914 fast = !!(param & HV_HYPERCALL_FAST_BIT);
1915 rep_cnt = (param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
1916 rep_idx = (param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
1917 rep = !!(rep_cnt || rep_idx);
1918
1919 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
1920
1921 switch (code) {
1922 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
1923 if (unlikely(rep)) {
1924 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1925 break;
1926 }
1927 kvm_vcpu_on_spin(vcpu, true);
1928 break;
1929 case HVCALL_SIGNAL_EVENT:
1930 if (unlikely(rep)) {
1931 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1932 break;
1933 }
1934 ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
1935 if (ret != HV_STATUS_INVALID_PORT_ID)
1936 break;
1937 fallthrough; /* maybe userspace knows this conn_id */
1938 case HVCALL_POST_MESSAGE:
1939 /* don't bother userspace if it has no way to handle it */
1940 if (unlikely(rep || !to_hv_synic(vcpu)->active)) {
1941 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1942 break;
1943 }
1944 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
1945 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
1946 vcpu->run->hyperv.u.hcall.input = param;
1947 vcpu->run->hyperv.u.hcall.params[0] = ingpa;
1948 vcpu->run->hyperv.u.hcall.params[1] = outgpa;
1949 vcpu->arch.complete_userspace_io =
1950 kvm_hv_hypercall_complete_userspace;
1951 return 0;
1952 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
1953 if (unlikely(fast || !rep_cnt || rep_idx)) {
1954 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1955 break;
1956 }
1957 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
1958 break;
1959 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
1960 if (unlikely(fast || rep)) {
1961 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1962 break;
1963 }
1964 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
1965 break;
1966 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
1967 if (unlikely(fast || !rep_cnt || rep_idx)) {
1968 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1969 break;
1970 }
1971 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
1972 break;
1973 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
1974 if (unlikely(fast || rep)) {
1975 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1976 break;
1977 }
1978 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
1979 break;
1980 case HVCALL_SEND_IPI:
1981 if (unlikely(rep)) {
1982 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1983 break;
1984 }
1985 ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, false, fast);
1986 break;
1987 case HVCALL_SEND_IPI_EX:
1988 if (unlikely(fast || rep)) {
1989 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1990 break;
1991 }
1992 ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false);
1993 break;
1994 case HVCALL_POST_DEBUG_DATA:
1995 case HVCALL_RETRIEVE_DEBUG_DATA:
1996 if (unlikely(fast)) {
1997 ret = HV_STATUS_INVALID_PARAMETER;
1998 break;
1999 }
2000 fallthrough;
2001 case HVCALL_RESET_DEBUG_SESSION: {
2002 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
2003
2004 if (!kvm_hv_is_syndbg_enabled(vcpu)) {
2005 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2006 break;
2007 }
2008
2009 if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) {
2010 ret = HV_STATUS_OPERATION_DENIED;
2011 break;
2012 }
2013 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
2014 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
2015 vcpu->run->hyperv.u.hcall.input = param;
2016 vcpu->run->hyperv.u.hcall.params[0] = ingpa;
2017 vcpu->run->hyperv.u.hcall.params[1] = outgpa;
2018 vcpu->arch.complete_userspace_io =
2019 kvm_hv_hypercall_complete_userspace;
2020 return 0;
2021 }
2022 default:
2023 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2024 break;
2025 }
2026
2027 return kvm_hv_hypercall_complete(vcpu, ret);
2028 }
2029
kvm_hv_init_vm(struct kvm * kvm)2030 void kvm_hv_init_vm(struct kvm *kvm)
2031 {
2032 struct kvm_hv *hv = to_kvm_hv(kvm);
2033
2034 mutex_init(&hv->hv_lock);
2035 idr_init(&hv->conn_to_evt);
2036 }
2037
kvm_hv_destroy_vm(struct kvm * kvm)2038 void kvm_hv_destroy_vm(struct kvm *kvm)
2039 {
2040 struct kvm_hv *hv = to_kvm_hv(kvm);
2041 struct eventfd_ctx *eventfd;
2042 int i;
2043
2044 idr_for_each_entry(&hv->conn_to_evt, eventfd, i)
2045 eventfd_ctx_put(eventfd);
2046 idr_destroy(&hv->conn_to_evt);
2047 }
2048
kvm_hv_eventfd_assign(struct kvm * kvm,u32 conn_id,int fd)2049 static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
2050 {
2051 struct kvm_hv *hv = to_kvm_hv(kvm);
2052 struct eventfd_ctx *eventfd;
2053 int ret;
2054
2055 eventfd = eventfd_ctx_fdget(fd);
2056 if (IS_ERR(eventfd))
2057 return PTR_ERR(eventfd);
2058
2059 mutex_lock(&hv->hv_lock);
2060 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
2061 GFP_KERNEL_ACCOUNT);
2062 mutex_unlock(&hv->hv_lock);
2063
2064 if (ret >= 0)
2065 return 0;
2066
2067 if (ret == -ENOSPC)
2068 ret = -EEXIST;
2069 eventfd_ctx_put(eventfd);
2070 return ret;
2071 }
2072
kvm_hv_eventfd_deassign(struct kvm * kvm,u32 conn_id)2073 static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
2074 {
2075 struct kvm_hv *hv = to_kvm_hv(kvm);
2076 struct eventfd_ctx *eventfd;
2077
2078 mutex_lock(&hv->hv_lock);
2079 eventfd = idr_remove(&hv->conn_to_evt, conn_id);
2080 mutex_unlock(&hv->hv_lock);
2081
2082 if (!eventfd)
2083 return -ENOENT;
2084
2085 synchronize_srcu(&kvm->srcu);
2086 eventfd_ctx_put(eventfd);
2087 return 0;
2088 }
2089
kvm_vm_ioctl_hv_eventfd(struct kvm * kvm,struct kvm_hyperv_eventfd * args)2090 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
2091 {
2092 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
2093 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
2094 return -EINVAL;
2095
2096 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
2097 return kvm_hv_eventfd_deassign(kvm, args->conn_id);
2098 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
2099 }
2100
kvm_get_hv_cpuid(struct kvm_vcpu * vcpu,struct kvm_cpuid2 * cpuid,struct kvm_cpuid_entry2 __user * entries)2101 int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
2102 struct kvm_cpuid_entry2 __user *entries)
2103 {
2104 uint16_t evmcs_ver = 0;
2105 struct kvm_cpuid_entry2 cpuid_entries[] = {
2106 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
2107 { .function = HYPERV_CPUID_INTERFACE },
2108 { .function = HYPERV_CPUID_VERSION },
2109 { .function = HYPERV_CPUID_FEATURES },
2110 { .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
2111 { .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
2112 { .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS },
2113 { .function = HYPERV_CPUID_SYNDBG_INTERFACE },
2114 { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES },
2115 { .function = HYPERV_CPUID_NESTED_FEATURES },
2116 };
2117 int i, nent = ARRAY_SIZE(cpuid_entries);
2118
2119 if (kvm_x86_ops.nested_ops->get_evmcs_version)
2120 evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
2121
2122 /* Skip NESTED_FEATURES if eVMCS is not supported */
2123 if (!evmcs_ver)
2124 --nent;
2125
2126 if (cpuid->nent < nent)
2127 return -E2BIG;
2128
2129 if (cpuid->nent > nent)
2130 cpuid->nent = nent;
2131
2132 for (i = 0; i < nent; i++) {
2133 struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
2134 u32 signature[3];
2135
2136 switch (ent->function) {
2137 case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
2138 memcpy(signature, "Linux KVM Hv", 12);
2139
2140 ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
2141 ent->ebx = signature[0];
2142 ent->ecx = signature[1];
2143 ent->edx = signature[2];
2144 break;
2145
2146 case HYPERV_CPUID_INTERFACE:
2147 ent->eax = HYPERV_CPUID_SIGNATURE_EAX;
2148 break;
2149
2150 case HYPERV_CPUID_VERSION:
2151 /*
2152 * We implement some Hyper-V 2016 functions so let's use
2153 * this version.
2154 */
2155 ent->eax = 0x00003839;
2156 ent->ebx = 0x000A0000;
2157 break;
2158
2159 case HYPERV_CPUID_FEATURES:
2160 ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
2161 ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
2162 ent->eax |= HV_MSR_SYNIC_AVAILABLE;
2163 ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
2164 ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
2165 ent->eax |= HV_MSR_HYPERCALL_AVAILABLE;
2166 ent->eax |= HV_MSR_VP_INDEX_AVAILABLE;
2167 ent->eax |= HV_MSR_RESET_AVAILABLE;
2168 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
2169 ent->eax |= HV_ACCESS_FREQUENCY_MSRS;
2170 ent->eax |= HV_ACCESS_REENLIGHTENMENT;
2171
2172 ent->ebx |= HV_POST_MESSAGES;
2173 ent->ebx |= HV_SIGNAL_EVENTS;
2174
2175 ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
2176 ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
2177
2178 ent->ebx |= HV_DEBUGGING;
2179 ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE;
2180 ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
2181
2182 /*
2183 * Direct Synthetic timers only make sense with in-kernel
2184 * LAPIC
2185 */
2186 if (!vcpu || lapic_in_kernel(vcpu))
2187 ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
2188
2189 break;
2190
2191 case HYPERV_CPUID_ENLIGHTMENT_INFO:
2192 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2193 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
2194 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
2195 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
2196 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
2197 if (evmcs_ver)
2198 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
2199 if (!cpu_smt_possible())
2200 ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
2201 /*
2202 * Default number of spinlock retry attempts, matches
2203 * HyperV 2016.
2204 */
2205 ent->ebx = 0x00000FFF;
2206
2207 break;
2208
2209 case HYPERV_CPUID_IMPLEMENT_LIMITS:
2210 /* Maximum number of virtual processors */
2211 ent->eax = KVM_MAX_VCPUS;
2212 /*
2213 * Maximum number of logical processors, matches
2214 * HyperV 2016.
2215 */
2216 ent->ebx = 64;
2217
2218 break;
2219
2220 case HYPERV_CPUID_NESTED_FEATURES:
2221 ent->eax = evmcs_ver;
2222
2223 break;
2224
2225 case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS:
2226 memcpy(signature, "Linux KVM Hv", 12);
2227
2228 ent->eax = 0;
2229 ent->ebx = signature[0];
2230 ent->ecx = signature[1];
2231 ent->edx = signature[2];
2232 break;
2233
2234 case HYPERV_CPUID_SYNDBG_INTERFACE:
2235 memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
2236 ent->eax = signature[0];
2237 break;
2238
2239 case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES:
2240 ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
2241 break;
2242
2243 default:
2244 break;
2245 }
2246 }
2247
2248 if (copy_to_user(entries, cpuid_entries,
2249 nent * sizeof(struct kvm_cpuid_entry2)))
2250 return -EFAULT;
2251
2252 return 0;
2253 }
2254