1 // SPDX-License-Identifier: GPL-2.0-only
2
3 /*
4 * Local APIC virtualization
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2007 Novell
8 * Copyright (C) 2007 Intel
9 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
10 *
11 * Authors:
12 * Dor Laor <dor.laor@qumranet.com>
13 * Gregory Haskins <ghaskins@novell.com>
14 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
15 *
16 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17 */
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h>
22 #include <linux/mm.h>
23 #include <linux/highmem.h>
24 #include <linux/smp.h>
25 #include <linux/hrtimer.h>
26 #include <linux/io.h>
27 #include <linux/export.h>
28 #include <linux/math64.h>
29 #include <linux/slab.h>
30 #include <asm/processor.h>
31 #include <asm/mce.h>
32 #include <asm/msr.h>
33 #include <asm/page.h>
34 #include <asm/current.h>
35 #include <asm/apicdef.h>
36 #include <asm/delay.h>
37 #include <linux/atomic.h>
38 #include <linux/jump_label.h>
39 #include "kvm_cache_regs.h"
40 #include "irq.h"
41 #include "ioapic.h"
42 #include "trace.h"
43 #include "x86.h"
44 #include "xen.h"
45 #include "cpuid.h"
46 #include "hyperv.h"
47 #include "smm.h"
48
49 #ifndef CONFIG_X86_64
50 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
51 #else
52 #define mod_64(x, y) ((x) % (y))
53 #endif
54
55 /* 14 is the version for Xeon and Pentium 8.4.8*/
56 #define APIC_VERSION 0x14UL
57 #define LAPIC_MMIO_LENGTH (1 << 12)
58 /* followed define is not in apicdef.h */
59 #define MAX_APIC_VECTOR 256
60 #define APIC_VECTORS_PER_REG 32
61
62 /*
63 * Enable local APIC timer advancement (tscdeadline mode only) with adaptive
64 * tuning. When enabled, KVM programs the host timer event to fire early, i.e.
65 * before the deadline expires, to account for the delay between taking the
66 * VM-Exit (to inject the guest event) and the subsequent VM-Enter to resume
67 * the guest, i.e. so that the interrupt arrives in the guest with minimal
68 * latency relative to the deadline programmed by the guest.
69 */
70 static bool lapic_timer_advance __read_mostly = true;
71 module_param(lapic_timer_advance, bool, 0444);
72
73 #define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
74 #define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
75 #define LAPIC_TIMER_ADVANCE_NS_INIT 1000
76 #define LAPIC_TIMER_ADVANCE_NS_MAX 5000
77 /* step-by-step approximation to mitigate fluctuation */
78 #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
79 static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data);
80 static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data);
81
__kvm_lapic_set_reg(char * regs,int reg_off,u32 val)82 static inline void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val)
83 {
84 *((u32 *) (regs + reg_off)) = val;
85 }
86
kvm_lapic_set_reg(struct kvm_lapic * apic,int reg_off,u32 val)87 static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
88 {
89 __kvm_lapic_set_reg(apic->regs, reg_off, val);
90 }
91
__kvm_lapic_get_reg64(char * regs,int reg)92 static __always_inline u64 __kvm_lapic_get_reg64(char *regs, int reg)
93 {
94 BUILD_BUG_ON(reg != APIC_ICR);
95 return *((u64 *) (regs + reg));
96 }
97
kvm_lapic_get_reg64(struct kvm_lapic * apic,int reg)98 static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg)
99 {
100 return __kvm_lapic_get_reg64(apic->regs, reg);
101 }
102
__kvm_lapic_set_reg64(char * regs,int reg,u64 val)103 static __always_inline void __kvm_lapic_set_reg64(char *regs, int reg, u64 val)
104 {
105 BUILD_BUG_ON(reg != APIC_ICR);
106 *((u64 *) (regs + reg)) = val;
107 }
108
kvm_lapic_set_reg64(struct kvm_lapic * apic,int reg,u64 val)109 static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic,
110 int reg, u64 val)
111 {
112 __kvm_lapic_set_reg64(apic->regs, reg, val);
113 }
114
apic_test_vector(int vec,void * bitmap)115 static inline int apic_test_vector(int vec, void *bitmap)
116 {
117 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
118 }
119
kvm_apic_pending_eoi(struct kvm_vcpu * vcpu,int vector)120 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
121 {
122 struct kvm_lapic *apic = vcpu->arch.apic;
123
124 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
125 apic_test_vector(vector, apic->regs + APIC_IRR);
126 }
127
__apic_test_and_set_vector(int vec,void * bitmap)128 static inline int __apic_test_and_set_vector(int vec, void *bitmap)
129 {
130 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
131 }
132
__apic_test_and_clear_vector(int vec,void * bitmap)133 static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
134 {
135 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
136 }
137
138 __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
139 EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu);
140
141 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
142 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
143
apic_enabled(struct kvm_lapic * apic)144 static inline int apic_enabled(struct kvm_lapic *apic)
145 {
146 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
147 }
148
149 #define LVT_MASK \
150 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
151
152 #define LINT_MASK \
153 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
154 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
155
kvm_x2apic_id(struct kvm_lapic * apic)156 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
157 {
158 return apic->vcpu->vcpu_id;
159 }
160
kvm_can_post_timer_interrupt(struct kvm_vcpu * vcpu)161 static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
162 {
163 return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) &&
164 (kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm));
165 }
166
kvm_can_use_hv_timer(struct kvm_vcpu * vcpu)167 bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
168 {
169 return kvm_x86_ops.set_hv_timer
170 && !(kvm_mwait_in_guest(vcpu->kvm) ||
171 kvm_can_post_timer_interrupt(vcpu));
172 }
173
kvm_use_posted_timer_interrupt(struct kvm_vcpu * vcpu)174 static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
175 {
176 return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
177 }
178
kvm_apic_calc_x2apic_ldr(u32 id)179 static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
180 {
181 return ((id >> 4) << 16) | (1 << (id & 0xf));
182 }
183
kvm_apic_map_get_logical_dest(struct kvm_apic_map * map,u32 dest_id,struct kvm_lapic *** cluster,u16 * mask)184 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
185 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
186 switch (map->logical_mode) {
187 case KVM_APIC_MODE_SW_DISABLED:
188 /* Arbitrarily use the flat map so that @cluster isn't NULL. */
189 *cluster = map->xapic_flat_map;
190 *mask = 0;
191 return true;
192 case KVM_APIC_MODE_X2APIC: {
193 u32 offset = (dest_id >> 16) * 16;
194 u32 max_apic_id = map->max_apic_id;
195
196 if (offset <= max_apic_id) {
197 u8 cluster_size = min(max_apic_id - offset + 1, 16U);
198
199 offset = array_index_nospec(offset, map->max_apic_id + 1);
200 *cluster = &map->phys_map[offset];
201 *mask = dest_id & (0xffff >> (16 - cluster_size));
202 } else {
203 *mask = 0;
204 }
205
206 return true;
207 }
208 case KVM_APIC_MODE_XAPIC_FLAT:
209 *cluster = map->xapic_flat_map;
210 *mask = dest_id & 0xff;
211 return true;
212 case KVM_APIC_MODE_XAPIC_CLUSTER:
213 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
214 *mask = dest_id & 0xf;
215 return true;
216 case KVM_APIC_MODE_MAP_DISABLED:
217 return false;
218 default:
219 WARN_ON_ONCE(1);
220 return false;
221 }
222 }
223
kvm_apic_map_free(struct rcu_head * rcu)224 static void kvm_apic_map_free(struct rcu_head *rcu)
225 {
226 struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
227
228 kvfree(map);
229 }
230
kvm_recalculate_phys_map(struct kvm_apic_map * new,struct kvm_vcpu * vcpu,bool * xapic_id_mismatch)231 static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
232 struct kvm_vcpu *vcpu,
233 bool *xapic_id_mismatch)
234 {
235 struct kvm_lapic *apic = vcpu->arch.apic;
236 u32 x2apic_id = kvm_x2apic_id(apic);
237 u32 xapic_id = kvm_xapic_id(apic);
238 u32 physical_id;
239
240 /*
241 * For simplicity, KVM always allocates enough space for all possible
242 * xAPIC IDs. Yell, but don't kill the VM, as KVM can continue on
243 * without the optimized map.
244 */
245 if (WARN_ON_ONCE(xapic_id > new->max_apic_id))
246 return -EINVAL;
247
248 /*
249 * Bail if a vCPU was added and/or enabled its APIC between allocating
250 * the map and doing the actual calculations for the map. Note, KVM
251 * hardcodes the x2APIC ID to vcpu_id, i.e. there's no TOCTOU bug if
252 * the compiler decides to reload x2apic_id after this check.
253 */
254 if (x2apic_id > new->max_apic_id)
255 return -E2BIG;
256
257 /*
258 * Deliberately truncate the vCPU ID when detecting a mismatched APIC
259 * ID to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a
260 * 32-bit value. Any unwanted aliasing due to truncation results will
261 * be detected below.
262 */
263 if (!apic_x2apic_mode(apic) && xapic_id != (u8)vcpu->vcpu_id)
264 *xapic_id_mismatch = true;
265
266 /*
267 * Apply KVM's hotplug hack if userspace has enable 32-bit APIC IDs.
268 * Allow sending events to vCPUs by their x2APIC ID even if the target
269 * vCPU is in legacy xAPIC mode, and silently ignore aliased xAPIC IDs
270 * (the x2APIC ID is truncated to 8 bits, causing IDs > 0xff to wrap
271 * and collide).
272 *
273 * Honor the architectural (and KVM's non-optimized) behavior if
274 * userspace has not enabled 32-bit x2APIC IDs. Each APIC is supposed
275 * to process messages independently. If multiple vCPUs have the same
276 * effective APIC ID, e.g. due to the x2APIC wrap or because the guest
277 * manually modified its xAPIC IDs, events targeting that ID are
278 * supposed to be recognized by all vCPUs with said ID.
279 */
280 if (vcpu->kvm->arch.x2apic_format) {
281 /* See also kvm_apic_match_physical_addr(). */
282 if (apic_x2apic_mode(apic) || x2apic_id > 0xff)
283 new->phys_map[x2apic_id] = apic;
284
285 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
286 new->phys_map[xapic_id] = apic;
287 } else {
288 /*
289 * Disable the optimized map if the physical APIC ID is already
290 * mapped, i.e. is aliased to multiple vCPUs. The optimized
291 * map requires a strict 1:1 mapping between IDs and vCPUs.
292 */
293 if (apic_x2apic_mode(apic))
294 physical_id = x2apic_id;
295 else
296 physical_id = xapic_id;
297
298 if (new->phys_map[physical_id])
299 return -EINVAL;
300
301 new->phys_map[physical_id] = apic;
302 }
303
304 return 0;
305 }
306
kvm_recalculate_logical_map(struct kvm_apic_map * new,struct kvm_vcpu * vcpu)307 static void kvm_recalculate_logical_map(struct kvm_apic_map *new,
308 struct kvm_vcpu *vcpu)
309 {
310 struct kvm_lapic *apic = vcpu->arch.apic;
311 enum kvm_apic_logical_mode logical_mode;
312 struct kvm_lapic **cluster;
313 u16 mask;
314 u32 ldr;
315
316 if (new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
317 return;
318
319 if (!kvm_apic_sw_enabled(apic))
320 return;
321
322 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
323 if (!ldr)
324 return;
325
326 if (apic_x2apic_mode(apic)) {
327 logical_mode = KVM_APIC_MODE_X2APIC;
328 } else {
329 ldr = GET_APIC_LOGICAL_ID(ldr);
330 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
331 logical_mode = KVM_APIC_MODE_XAPIC_FLAT;
332 else
333 logical_mode = KVM_APIC_MODE_XAPIC_CLUSTER;
334 }
335
336 /*
337 * To optimize logical mode delivery, all software-enabled APICs must
338 * be configured for the same mode.
339 */
340 if (new->logical_mode == KVM_APIC_MODE_SW_DISABLED) {
341 new->logical_mode = logical_mode;
342 } else if (new->logical_mode != logical_mode) {
343 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
344 return;
345 }
346
347 /*
348 * In x2APIC mode, the LDR is read-only and derived directly from the
349 * x2APIC ID, thus is guaranteed to be addressable. KVM reuses
350 * kvm_apic_map.phys_map to optimize logical mode x2APIC interrupts by
351 * reversing the LDR calculation to get cluster of APICs, i.e. no
352 * additional work is required.
353 */
354 if (apic_x2apic_mode(apic)) {
355 WARN_ON_ONCE(ldr != kvm_apic_calc_x2apic_ldr(kvm_x2apic_id(apic)));
356 return;
357 }
358
359 if (WARN_ON_ONCE(!kvm_apic_map_get_logical_dest(new, ldr,
360 &cluster, &mask))) {
361 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
362 return;
363 }
364
365 if (!mask)
366 return;
367
368 ldr = ffs(mask) - 1;
369 if (!is_power_of_2(mask) || cluster[ldr])
370 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
371 else
372 cluster[ldr] = apic;
373 }
374
375 /*
376 * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
377 *
378 * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
379 * apic_map_lock_held.
380 */
381 enum {
382 CLEAN,
383 UPDATE_IN_PROGRESS,
384 DIRTY
385 };
386
kvm_recalculate_apic_map(struct kvm * kvm)387 void kvm_recalculate_apic_map(struct kvm *kvm)
388 {
389 struct kvm_apic_map *new, *old = NULL;
390 struct kvm_vcpu *vcpu;
391 unsigned long i;
392 u32 max_id = 255; /* enough space for any xAPIC ID */
393 bool xapic_id_mismatch;
394 int r;
395
396 /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
397 if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
398 return;
399
400 WARN_ONCE(!irqchip_in_kernel(kvm),
401 "Dirty APIC map without an in-kernel local APIC");
402
403 mutex_lock(&kvm->arch.apic_map_lock);
404
405 retry:
406 /*
407 * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map (if clean)
408 * or the APIC registers (if dirty). Note, on retry the map may have
409 * not yet been marked dirty by whatever task changed a vCPU's x2APIC
410 * ID, i.e. the map may still show up as in-progress. In that case
411 * this task still needs to retry and complete its calculation.
412 */
413 if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
414 DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
415 /* Someone else has updated the map. */
416 mutex_unlock(&kvm->arch.apic_map_lock);
417 return;
418 }
419
420 /*
421 * Reset the mismatch flag between attempts so that KVM does the right
422 * thing if a vCPU changes its xAPIC ID, but do NOT reset max_id, i.e.
423 * keep max_id strictly increasing. Disallowing max_id from shrinking
424 * ensures KVM won't get stuck in an infinite loop, e.g. if the vCPU
425 * with the highest x2APIC ID is toggling its APIC on and off.
426 */
427 xapic_id_mismatch = false;
428
429 kvm_for_each_vcpu(i, vcpu, kvm)
430 if (kvm_apic_present(vcpu))
431 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
432
433 new = kvzalloc(sizeof(struct kvm_apic_map) +
434 sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
435 GFP_KERNEL_ACCOUNT);
436
437 if (!new)
438 goto out;
439
440 new->max_apic_id = max_id;
441 new->logical_mode = KVM_APIC_MODE_SW_DISABLED;
442
443 kvm_for_each_vcpu(i, vcpu, kvm) {
444 if (!kvm_apic_present(vcpu))
445 continue;
446
447 r = kvm_recalculate_phys_map(new, vcpu, &xapic_id_mismatch);
448 if (r) {
449 kvfree(new);
450 new = NULL;
451 if (r == -E2BIG) {
452 cond_resched();
453 goto retry;
454 }
455
456 goto out;
457 }
458
459 kvm_recalculate_logical_map(new, vcpu);
460 }
461 out:
462 /*
463 * The optimized map is effectively KVM's internal version of APICv,
464 * and all unwanted aliasing that results in disabling the optimized
465 * map also applies to APICv.
466 */
467 if (!new)
468 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
469 else
470 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
471
472 if (!new || new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
473 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
474 else
475 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
476
477 if (xapic_id_mismatch)
478 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
479 else
480 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
481
482 old = rcu_dereference_protected(kvm->arch.apic_map,
483 lockdep_is_held(&kvm->arch.apic_map_lock));
484 rcu_assign_pointer(kvm->arch.apic_map, new);
485 /*
486 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
487 * If another update has come in, leave it DIRTY.
488 */
489 atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
490 UPDATE_IN_PROGRESS, CLEAN);
491 mutex_unlock(&kvm->arch.apic_map_lock);
492
493 if (old)
494 call_rcu(&old->rcu, kvm_apic_map_free);
495
496 kvm_make_scan_ioapic_request(kvm);
497 }
498
apic_set_spiv(struct kvm_lapic * apic,u32 val)499 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
500 {
501 bool enabled = val & APIC_SPIV_APIC_ENABLED;
502
503 kvm_lapic_set_reg(apic, APIC_SPIV, val);
504
505 if (enabled != apic->sw_enabled) {
506 apic->sw_enabled = enabled;
507 if (enabled)
508 static_branch_slow_dec_deferred(&apic_sw_disabled);
509 else
510 static_branch_inc(&apic_sw_disabled.key);
511
512 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
513 }
514
515 /* Check if there are APF page ready requests pending */
516 if (enabled) {
517 kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
518 kvm_xen_sw_enable_lapic(apic->vcpu);
519 }
520 }
521
kvm_apic_set_xapic_id(struct kvm_lapic * apic,u8 id)522 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
523 {
524 kvm_lapic_set_reg(apic, APIC_ID, id << 24);
525 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
526 }
527
kvm_apic_set_ldr(struct kvm_lapic * apic,u32 id)528 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
529 {
530 kvm_lapic_set_reg(apic, APIC_LDR, id);
531 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
532 }
533
kvm_apic_set_dfr(struct kvm_lapic * apic,u32 val)534 static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
535 {
536 kvm_lapic_set_reg(apic, APIC_DFR, val);
537 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
538 }
539
kvm_apic_set_x2apic_id(struct kvm_lapic * apic,u32 id)540 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
541 {
542 u32 ldr = kvm_apic_calc_x2apic_ldr(id);
543
544 WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
545
546 kvm_lapic_set_reg(apic, APIC_ID, id);
547 kvm_lapic_set_reg(apic, APIC_LDR, ldr);
548 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
549 }
550
apic_lvt_enabled(struct kvm_lapic * apic,int lvt_type)551 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
552 {
553 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
554 }
555
apic_lvtt_oneshot(struct kvm_lapic * apic)556 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
557 {
558 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
559 }
560
apic_lvtt_period(struct kvm_lapic * apic)561 static inline int apic_lvtt_period(struct kvm_lapic *apic)
562 {
563 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
564 }
565
apic_lvtt_tscdeadline(struct kvm_lapic * apic)566 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
567 {
568 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
569 }
570
apic_lvt_nmi_mode(u32 lvt_val)571 static inline int apic_lvt_nmi_mode(u32 lvt_val)
572 {
573 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
574 }
575
kvm_lapic_lvt_supported(struct kvm_lapic * apic,int lvt_index)576 static inline bool kvm_lapic_lvt_supported(struct kvm_lapic *apic, int lvt_index)
577 {
578 return apic->nr_lvt_entries > lvt_index;
579 }
580
kvm_apic_calc_nr_lvt_entries(struct kvm_vcpu * vcpu)581 static inline int kvm_apic_calc_nr_lvt_entries(struct kvm_vcpu *vcpu)
582 {
583 return KVM_APIC_MAX_NR_LVT_ENTRIES - !(vcpu->arch.mcg_cap & MCG_CMCI_P);
584 }
585
kvm_apic_set_version(struct kvm_vcpu * vcpu)586 void kvm_apic_set_version(struct kvm_vcpu *vcpu)
587 {
588 struct kvm_lapic *apic = vcpu->arch.apic;
589 u32 v = 0;
590
591 if (!lapic_in_kernel(vcpu))
592 return;
593
594 v = APIC_VERSION | ((apic->nr_lvt_entries - 1) << 16);
595
596 /*
597 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
598 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
599 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
600 * version first and level-triggered interrupts never get EOIed in
601 * IOAPIC.
602 */
603 if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
604 !ioapic_in_kernel(vcpu->kvm))
605 v |= APIC_LVR_DIRECTED_EOI;
606 kvm_lapic_set_reg(apic, APIC_LVR, v);
607 }
608
kvm_apic_after_set_mcg_cap(struct kvm_vcpu * vcpu)609 void kvm_apic_after_set_mcg_cap(struct kvm_vcpu *vcpu)
610 {
611 int nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
612 struct kvm_lapic *apic = vcpu->arch.apic;
613 int i;
614
615 if (!lapic_in_kernel(vcpu) || nr_lvt_entries == apic->nr_lvt_entries)
616 return;
617
618 /* Initialize/mask any "new" LVT entries. */
619 for (i = apic->nr_lvt_entries; i < nr_lvt_entries; i++)
620 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
621
622 apic->nr_lvt_entries = nr_lvt_entries;
623
624 /* The number of LVT entries is reflected in the version register. */
625 kvm_apic_set_version(vcpu);
626 }
627
628 static const unsigned int apic_lvt_mask[KVM_APIC_MAX_NR_LVT_ENTRIES] = {
629 [LVT_TIMER] = LVT_MASK, /* timer mode mask added at runtime */
630 [LVT_THERMAL_MONITOR] = LVT_MASK | APIC_MODE_MASK,
631 [LVT_PERFORMANCE_COUNTER] = LVT_MASK | APIC_MODE_MASK,
632 [LVT_LINT0] = LINT_MASK,
633 [LVT_LINT1] = LINT_MASK,
634 [LVT_ERROR] = LVT_MASK,
635 [LVT_CMCI] = LVT_MASK | APIC_MODE_MASK
636 };
637
find_highest_vector(void * bitmap)638 static int find_highest_vector(void *bitmap)
639 {
640 int vec;
641 u32 *reg;
642
643 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
644 vec >= 0; vec -= APIC_VECTORS_PER_REG) {
645 reg = bitmap + REG_POS(vec);
646 if (*reg)
647 return __fls(*reg) + vec;
648 }
649
650 return -1;
651 }
652
count_vectors(void * bitmap)653 static u8 count_vectors(void *bitmap)
654 {
655 int vec;
656 u32 *reg;
657 u8 count = 0;
658
659 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
660 reg = bitmap + REG_POS(vec);
661 count += hweight32(*reg);
662 }
663
664 return count;
665 }
666
__kvm_apic_update_irr(u32 * pir,void * regs,int * max_irr)667 bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
668 {
669 u32 i, vec;
670 u32 pir_val, irr_val, prev_irr_val;
671 int max_updated_irr;
672
673 max_updated_irr = -1;
674 *max_irr = -1;
675
676 for (i = vec = 0; i <= 7; i++, vec += 32) {
677 u32 *p_irr = (u32 *)(regs + APIC_IRR + i * 0x10);
678
679 irr_val = *p_irr;
680 pir_val = READ_ONCE(pir[i]);
681
682 if (pir_val) {
683 pir_val = xchg(&pir[i], 0);
684
685 prev_irr_val = irr_val;
686 do {
687 irr_val = prev_irr_val | pir_val;
688 } while (prev_irr_val != irr_val &&
689 !try_cmpxchg(p_irr, &prev_irr_val, irr_val));
690
691 if (prev_irr_val != irr_val)
692 max_updated_irr = __fls(irr_val ^ prev_irr_val) + vec;
693 }
694 if (irr_val)
695 *max_irr = __fls(irr_val) + vec;
696 }
697
698 return ((max_updated_irr != -1) &&
699 (max_updated_irr == *max_irr));
700 }
701 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
702
kvm_apic_update_irr(struct kvm_vcpu * vcpu,u32 * pir,int * max_irr)703 bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
704 {
705 struct kvm_lapic *apic = vcpu->arch.apic;
706 bool irr_updated = __kvm_apic_update_irr(pir, apic->regs, max_irr);
707
708 if (unlikely(!apic->apicv_active && irr_updated))
709 apic->irr_pending = true;
710 return irr_updated;
711 }
712 EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
713
apic_search_irr(struct kvm_lapic * apic)714 static inline int apic_search_irr(struct kvm_lapic *apic)
715 {
716 return find_highest_vector(apic->regs + APIC_IRR);
717 }
718
apic_find_highest_irr(struct kvm_lapic * apic)719 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
720 {
721 int result;
722
723 /*
724 * Note that irr_pending is just a hint. It will be always
725 * true with virtual interrupt delivery enabled.
726 */
727 if (!apic->irr_pending)
728 return -1;
729
730 result = apic_search_irr(apic);
731 ASSERT(result == -1 || result >= 16);
732
733 return result;
734 }
735
apic_clear_irr(int vec,struct kvm_lapic * apic)736 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
737 {
738 if (unlikely(apic->apicv_active)) {
739 /* need to update RVI */
740 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
741 kvm_x86_call(hwapic_irr_update)(apic->vcpu,
742 apic_find_highest_irr(apic));
743 } else {
744 apic->irr_pending = false;
745 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
746 if (apic_search_irr(apic) != -1)
747 apic->irr_pending = true;
748 }
749 }
750
kvm_apic_clear_irr(struct kvm_vcpu * vcpu,int vec)751 void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
752 {
753 apic_clear_irr(vec, vcpu->arch.apic);
754 }
755 EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
756
apic_set_isr(int vec,struct kvm_lapic * apic)757 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
758 {
759 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
760 return;
761
762 /*
763 * With APIC virtualization enabled, all caching is disabled
764 * because the processor can modify ISR under the hood. Instead
765 * just set SVI.
766 */
767 if (unlikely(apic->apicv_active))
768 kvm_x86_call(hwapic_isr_update)(vec);
769 else {
770 ++apic->isr_count;
771 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
772 /*
773 * ISR (in service register) bit is set when injecting an interrupt.
774 * The highest vector is injected. Thus the latest bit set matches
775 * the highest bit in ISR.
776 */
777 apic->highest_isr_cache = vec;
778 }
779 }
780
apic_find_highest_isr(struct kvm_lapic * apic)781 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
782 {
783 int result;
784
785 /*
786 * Note that isr_count is always 1, and highest_isr_cache
787 * is always -1, with APIC virtualization enabled.
788 */
789 if (!apic->isr_count)
790 return -1;
791 if (likely(apic->highest_isr_cache != -1))
792 return apic->highest_isr_cache;
793
794 result = find_highest_vector(apic->regs + APIC_ISR);
795 ASSERT(result == -1 || result >= 16);
796
797 return result;
798 }
799
apic_clear_isr(int vec,struct kvm_lapic * apic)800 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
801 {
802 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
803 return;
804
805 /*
806 * We do get here for APIC virtualization enabled if the guest
807 * uses the Hyper-V APIC enlightenment. In this case we may need
808 * to trigger a new interrupt delivery by writing the SVI field;
809 * on the other hand isr_count and highest_isr_cache are unused
810 * and must be left alone.
811 */
812 if (unlikely(apic->apicv_active))
813 kvm_x86_call(hwapic_isr_update)(apic_find_highest_isr(apic));
814 else {
815 --apic->isr_count;
816 BUG_ON(apic->isr_count < 0);
817 apic->highest_isr_cache = -1;
818 }
819 }
820
kvm_lapic_find_highest_irr(struct kvm_vcpu * vcpu)821 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
822 {
823 /* This may race with setting of irr in __apic_accept_irq() and
824 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
825 * will cause vmexit immediately and the value will be recalculated
826 * on the next vmentry.
827 */
828 return apic_find_highest_irr(vcpu->arch.apic);
829 }
830 EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
831
832 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
833 int vector, int level, int trig_mode,
834 struct dest_map *dest_map);
835
kvm_apic_set_irq(struct kvm_vcpu * vcpu,struct kvm_lapic_irq * irq,struct dest_map * dest_map)836 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
837 struct dest_map *dest_map)
838 {
839 struct kvm_lapic *apic = vcpu->arch.apic;
840
841 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
842 irq->level, irq->trig_mode, dest_map);
843 }
844
__pv_send_ipi(unsigned long * ipi_bitmap,struct kvm_apic_map * map,struct kvm_lapic_irq * irq,u32 min)845 static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
846 struct kvm_lapic_irq *irq, u32 min)
847 {
848 int i, count = 0;
849 struct kvm_vcpu *vcpu;
850
851 if (min > map->max_apic_id)
852 return 0;
853
854 for_each_set_bit(i, ipi_bitmap,
855 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
856 if (map->phys_map[min + i]) {
857 vcpu = map->phys_map[min + i]->vcpu;
858 count += kvm_apic_set_irq(vcpu, irq, NULL);
859 }
860 }
861
862 return count;
863 }
864
kvm_pv_send_ipi(struct kvm * kvm,unsigned long ipi_bitmap_low,unsigned long ipi_bitmap_high,u32 min,unsigned long icr,int op_64_bit)865 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
866 unsigned long ipi_bitmap_high, u32 min,
867 unsigned long icr, int op_64_bit)
868 {
869 struct kvm_apic_map *map;
870 struct kvm_lapic_irq irq = {0};
871 int cluster_size = op_64_bit ? 64 : 32;
872 int count;
873
874 if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
875 return -KVM_EINVAL;
876
877 irq.vector = icr & APIC_VECTOR_MASK;
878 irq.delivery_mode = icr & APIC_MODE_MASK;
879 irq.level = (icr & APIC_INT_ASSERT) != 0;
880 irq.trig_mode = icr & APIC_INT_LEVELTRIG;
881
882 rcu_read_lock();
883 map = rcu_dereference(kvm->arch.apic_map);
884
885 count = -EOPNOTSUPP;
886 if (likely(map)) {
887 count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
888 min += cluster_size;
889 count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
890 }
891
892 rcu_read_unlock();
893 return count;
894 }
895
pv_eoi_put_user(struct kvm_vcpu * vcpu,u8 val)896 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
897 {
898
899 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
900 sizeof(val));
901 }
902
pv_eoi_get_user(struct kvm_vcpu * vcpu,u8 * val)903 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
904 {
905
906 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
907 sizeof(*val));
908 }
909
pv_eoi_enabled(struct kvm_vcpu * vcpu)910 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
911 {
912 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
913 }
914
pv_eoi_set_pending(struct kvm_vcpu * vcpu)915 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
916 {
917 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0)
918 return;
919
920 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
921 }
922
pv_eoi_test_and_clr_pending(struct kvm_vcpu * vcpu)923 static bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu)
924 {
925 u8 val;
926
927 if (pv_eoi_get_user(vcpu, &val) < 0)
928 return false;
929
930 val &= KVM_PV_EOI_ENABLED;
931
932 if (val && pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0)
933 return false;
934
935 /*
936 * Clear pending bit in any case: it will be set again on vmentry.
937 * While this might not be ideal from performance point of view,
938 * this makes sure pv eoi is only enabled when we know it's safe.
939 */
940 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
941
942 return val;
943 }
944
apic_has_interrupt_for_ppr(struct kvm_lapic * apic,u32 ppr)945 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
946 {
947 int highest_irr;
948 if (kvm_x86_ops.sync_pir_to_irr)
949 highest_irr = kvm_x86_call(sync_pir_to_irr)(apic->vcpu);
950 else
951 highest_irr = apic_find_highest_irr(apic);
952 if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
953 return -1;
954 return highest_irr;
955 }
956
__apic_update_ppr(struct kvm_lapic * apic,u32 * new_ppr)957 static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
958 {
959 u32 tpr, isrv, ppr, old_ppr;
960 int isr;
961
962 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
963 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
964 isr = apic_find_highest_isr(apic);
965 isrv = (isr != -1) ? isr : 0;
966
967 if ((tpr & 0xf0) >= (isrv & 0xf0))
968 ppr = tpr & 0xff;
969 else
970 ppr = isrv & 0xf0;
971
972 *new_ppr = ppr;
973 if (old_ppr != ppr)
974 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
975
976 return ppr < old_ppr;
977 }
978
apic_update_ppr(struct kvm_lapic * apic)979 static void apic_update_ppr(struct kvm_lapic *apic)
980 {
981 u32 ppr;
982
983 if (__apic_update_ppr(apic, &ppr) &&
984 apic_has_interrupt_for_ppr(apic, ppr) != -1)
985 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
986 }
987
kvm_apic_update_ppr(struct kvm_vcpu * vcpu)988 void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
989 {
990 apic_update_ppr(vcpu->arch.apic);
991 }
992 EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
993
apic_set_tpr(struct kvm_lapic * apic,u32 tpr)994 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
995 {
996 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
997 apic_update_ppr(apic);
998 }
999
kvm_apic_broadcast(struct kvm_lapic * apic,u32 mda)1000 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
1001 {
1002 return mda == (apic_x2apic_mode(apic) ?
1003 X2APIC_BROADCAST : APIC_BROADCAST);
1004 }
1005
kvm_apic_match_physical_addr(struct kvm_lapic * apic,u32 mda)1006 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
1007 {
1008 if (kvm_apic_broadcast(apic, mda))
1009 return true;
1010
1011 /*
1012 * Hotplug hack: Accept interrupts for vCPUs in xAPIC mode as if they
1013 * were in x2APIC mode if the target APIC ID can't be encoded as an
1014 * xAPIC ID. This allows unique addressing of hotplugged vCPUs (which
1015 * start in xAPIC mode) with an APIC ID that is unaddressable in xAPIC
1016 * mode. Match the x2APIC ID if and only if the target APIC ID can't
1017 * be encoded in xAPIC to avoid spurious matches against a vCPU that
1018 * changed its (addressable) xAPIC ID (which is writable).
1019 */
1020 if (apic_x2apic_mode(apic) || mda > 0xff)
1021 return mda == kvm_x2apic_id(apic);
1022
1023 return mda == kvm_xapic_id(apic);
1024 }
1025
kvm_apic_match_logical_addr(struct kvm_lapic * apic,u32 mda)1026 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
1027 {
1028 u32 logical_id;
1029
1030 if (kvm_apic_broadcast(apic, mda))
1031 return true;
1032
1033 logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
1034
1035 if (apic_x2apic_mode(apic))
1036 return ((logical_id >> 16) == (mda >> 16))
1037 && (logical_id & mda & 0xffff) != 0;
1038
1039 logical_id = GET_APIC_LOGICAL_ID(logical_id);
1040
1041 switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
1042 case APIC_DFR_FLAT:
1043 return (logical_id & mda) != 0;
1044 case APIC_DFR_CLUSTER:
1045 return ((logical_id >> 4) == (mda >> 4))
1046 && (logical_id & mda & 0xf) != 0;
1047 default:
1048 return false;
1049 }
1050 }
1051
1052 /* The KVM local APIC implementation has two quirks:
1053 *
1054 * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
1055 * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
1056 * KVM doesn't do that aliasing.
1057 *
1058 * - in-kernel IOAPIC messages have to be delivered directly to
1059 * x2APIC, because the kernel does not support interrupt remapping.
1060 * In order to support broadcast without interrupt remapping, x2APIC
1061 * rewrites the destination of non-IPI messages from APIC_BROADCAST
1062 * to X2APIC_BROADCAST.
1063 *
1064 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
1065 * important when userspace wants to use x2APIC-format MSIs, because
1066 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
1067 */
kvm_apic_mda(struct kvm_vcpu * vcpu,unsigned int dest_id,struct kvm_lapic * source,struct kvm_lapic * target)1068 static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
1069 struct kvm_lapic *source, struct kvm_lapic *target)
1070 {
1071 bool ipi = source != NULL;
1072
1073 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
1074 !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
1075 return X2APIC_BROADCAST;
1076
1077 return dest_id;
1078 }
1079
kvm_apic_match_dest(struct kvm_vcpu * vcpu,struct kvm_lapic * source,int shorthand,unsigned int dest,int dest_mode)1080 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
1081 int shorthand, unsigned int dest, int dest_mode)
1082 {
1083 struct kvm_lapic *target = vcpu->arch.apic;
1084 u32 mda = kvm_apic_mda(vcpu, dest, source, target);
1085
1086 ASSERT(target);
1087 switch (shorthand) {
1088 case APIC_DEST_NOSHORT:
1089 if (dest_mode == APIC_DEST_PHYSICAL)
1090 return kvm_apic_match_physical_addr(target, mda);
1091 else
1092 return kvm_apic_match_logical_addr(target, mda);
1093 case APIC_DEST_SELF:
1094 return target == source;
1095 case APIC_DEST_ALLINC:
1096 return true;
1097 case APIC_DEST_ALLBUT:
1098 return target != source;
1099 default:
1100 return false;
1101 }
1102 }
1103 EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
1104
kvm_vector_to_index(u32 vector,u32 dest_vcpus,const unsigned long * bitmap,u32 bitmap_size)1105 int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
1106 const unsigned long *bitmap, u32 bitmap_size)
1107 {
1108 u32 mod;
1109 int i, idx = -1;
1110
1111 mod = vector % dest_vcpus;
1112
1113 for (i = 0; i <= mod; i++) {
1114 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
1115 BUG_ON(idx == bitmap_size);
1116 }
1117
1118 return idx;
1119 }
1120
kvm_apic_disabled_lapic_found(struct kvm * kvm)1121 static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
1122 {
1123 if (!kvm->arch.disabled_lapic_found) {
1124 kvm->arch.disabled_lapic_found = true;
1125 pr_info("Disabled LAPIC found during irq injection\n");
1126 }
1127 }
1128
kvm_apic_is_broadcast_dest(struct kvm * kvm,struct kvm_lapic ** src,struct kvm_lapic_irq * irq,struct kvm_apic_map * map)1129 static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
1130 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
1131 {
1132 if (kvm->arch.x2apic_broadcast_quirk_disabled) {
1133 if ((irq->dest_id == APIC_BROADCAST &&
1134 map->logical_mode != KVM_APIC_MODE_X2APIC))
1135 return true;
1136 if (irq->dest_id == X2APIC_BROADCAST)
1137 return true;
1138 } else {
1139 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
1140 if (irq->dest_id == (x2apic_ipi ?
1141 X2APIC_BROADCAST : APIC_BROADCAST))
1142 return true;
1143 }
1144
1145 return false;
1146 }
1147
1148 /* Return true if the interrupt can be handled by using *bitmap as index mask
1149 * for valid destinations in *dst array.
1150 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
1151 * Note: we may have zero kvm_lapic destinations when we return true, which
1152 * means that the interrupt should be dropped. In this case, *bitmap would be
1153 * zero and *dst undefined.
1154 */
kvm_apic_map_get_dest_lapic(struct kvm * kvm,struct kvm_lapic ** src,struct kvm_lapic_irq * irq,struct kvm_apic_map * map,struct kvm_lapic *** dst,unsigned long * bitmap)1155 static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
1156 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
1157 struct kvm_apic_map *map, struct kvm_lapic ***dst,
1158 unsigned long *bitmap)
1159 {
1160 int i, lowest;
1161
1162 if (irq->shorthand == APIC_DEST_SELF && src) {
1163 *dst = src;
1164 *bitmap = 1;
1165 return true;
1166 } else if (irq->shorthand)
1167 return false;
1168
1169 if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
1170 return false;
1171
1172 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
1173 if (irq->dest_id > map->max_apic_id) {
1174 *bitmap = 0;
1175 } else {
1176 u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
1177 *dst = &map->phys_map[dest_id];
1178 *bitmap = 1;
1179 }
1180 return true;
1181 }
1182
1183 *bitmap = 0;
1184 if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
1185 (u16 *)bitmap))
1186 return false;
1187
1188 if (!kvm_lowest_prio_delivery(irq))
1189 return true;
1190
1191 if (!kvm_vector_hashing_enabled()) {
1192 lowest = -1;
1193 for_each_set_bit(i, bitmap, 16) {
1194 if (!(*dst)[i])
1195 continue;
1196 if (lowest < 0)
1197 lowest = i;
1198 else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
1199 (*dst)[lowest]->vcpu) < 0)
1200 lowest = i;
1201 }
1202 } else {
1203 if (!*bitmap)
1204 return true;
1205
1206 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
1207 bitmap, 16);
1208
1209 if (!(*dst)[lowest]) {
1210 kvm_apic_disabled_lapic_found(kvm);
1211 *bitmap = 0;
1212 return true;
1213 }
1214 }
1215
1216 *bitmap = (lowest >= 0) ? 1 << lowest : 0;
1217
1218 return true;
1219 }
1220
kvm_irq_delivery_to_apic_fast(struct kvm * kvm,struct kvm_lapic * src,struct kvm_lapic_irq * irq,int * r,struct dest_map * dest_map)1221 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
1222 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
1223 {
1224 struct kvm_apic_map *map;
1225 unsigned long bitmap;
1226 struct kvm_lapic **dst = NULL;
1227 int i;
1228 bool ret;
1229
1230 *r = -1;
1231
1232 if (irq->shorthand == APIC_DEST_SELF) {
1233 if (KVM_BUG_ON(!src, kvm)) {
1234 *r = 0;
1235 return true;
1236 }
1237 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
1238 return true;
1239 }
1240
1241 rcu_read_lock();
1242 map = rcu_dereference(kvm->arch.apic_map);
1243
1244 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
1245 if (ret) {
1246 *r = 0;
1247 for_each_set_bit(i, &bitmap, 16) {
1248 if (!dst[i])
1249 continue;
1250 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
1251 }
1252 }
1253
1254 rcu_read_unlock();
1255 return ret;
1256 }
1257
1258 /*
1259 * This routine tries to handle interrupts in posted mode, here is how
1260 * it deals with different cases:
1261 * - For single-destination interrupts, handle it in posted mode
1262 * - Else if vector hashing is enabled and it is a lowest-priority
1263 * interrupt, handle it in posted mode and use the following mechanism
1264 * to find the destination vCPU.
1265 * 1. For lowest-priority interrupts, store all the possible
1266 * destination vCPUs in an array.
1267 * 2. Use "guest vector % max number of destination vCPUs" to find
1268 * the right destination vCPU in the array for the lowest-priority
1269 * interrupt.
1270 * - Otherwise, use remapped mode to inject the interrupt.
1271 */
kvm_intr_is_single_vcpu_fast(struct kvm * kvm,struct kvm_lapic_irq * irq,struct kvm_vcpu ** dest_vcpu)1272 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
1273 struct kvm_vcpu **dest_vcpu)
1274 {
1275 struct kvm_apic_map *map;
1276 unsigned long bitmap;
1277 struct kvm_lapic **dst = NULL;
1278 bool ret = false;
1279
1280 if (irq->shorthand)
1281 return false;
1282
1283 rcu_read_lock();
1284 map = rcu_dereference(kvm->arch.apic_map);
1285
1286 if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1287 hweight16(bitmap) == 1) {
1288 unsigned long i = find_first_bit(&bitmap, 16);
1289
1290 if (dst[i]) {
1291 *dest_vcpu = dst[i]->vcpu;
1292 ret = true;
1293 }
1294 }
1295
1296 rcu_read_unlock();
1297 return ret;
1298 }
1299
1300 /*
1301 * Add a pending IRQ into lapic.
1302 * Return 1 if successfully added and 0 if discarded.
1303 */
__apic_accept_irq(struct kvm_lapic * apic,int delivery_mode,int vector,int level,int trig_mode,struct dest_map * dest_map)1304 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1305 int vector, int level, int trig_mode,
1306 struct dest_map *dest_map)
1307 {
1308 int result = 0;
1309 struct kvm_vcpu *vcpu = apic->vcpu;
1310
1311 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1312 trig_mode, vector);
1313 switch (delivery_mode) {
1314 case APIC_DM_LOWEST:
1315 vcpu->arch.apic_arb_prio++;
1316 fallthrough;
1317 case APIC_DM_FIXED:
1318 if (unlikely(trig_mode && !level))
1319 break;
1320
1321 /* FIXME add logic for vcpu on reset */
1322 if (unlikely(!apic_enabled(apic)))
1323 break;
1324
1325 result = 1;
1326
1327 if (dest_map) {
1328 __set_bit(vcpu->vcpu_id, dest_map->map);
1329 dest_map->vectors[vcpu->vcpu_id] = vector;
1330 }
1331
1332 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1333 if (trig_mode)
1334 kvm_lapic_set_vector(vector,
1335 apic->regs + APIC_TMR);
1336 else
1337 kvm_lapic_clear_vector(vector,
1338 apic->regs + APIC_TMR);
1339 }
1340
1341 kvm_x86_call(deliver_interrupt)(apic, delivery_mode,
1342 trig_mode, vector);
1343 break;
1344
1345 case APIC_DM_REMRD:
1346 result = 1;
1347 vcpu->arch.pv.pv_unhalted = 1;
1348 kvm_make_request(KVM_REQ_EVENT, vcpu);
1349 kvm_vcpu_kick(vcpu);
1350 break;
1351
1352 case APIC_DM_SMI:
1353 if (!kvm_inject_smi(vcpu)) {
1354 kvm_vcpu_kick(vcpu);
1355 result = 1;
1356 }
1357 break;
1358
1359 case APIC_DM_NMI:
1360 result = 1;
1361 kvm_inject_nmi(vcpu);
1362 kvm_vcpu_kick(vcpu);
1363 break;
1364
1365 case APIC_DM_INIT:
1366 if (!trig_mode || level) {
1367 result = 1;
1368 /* assumes that there are only KVM_APIC_INIT/SIPI */
1369 apic->pending_events = (1UL << KVM_APIC_INIT);
1370 kvm_make_request(KVM_REQ_EVENT, vcpu);
1371 kvm_vcpu_kick(vcpu);
1372 }
1373 break;
1374
1375 case APIC_DM_STARTUP:
1376 result = 1;
1377 apic->sipi_vector = vector;
1378 /* make sure sipi_vector is visible for the receiver */
1379 smp_wmb();
1380 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1381 kvm_make_request(KVM_REQ_EVENT, vcpu);
1382 kvm_vcpu_kick(vcpu);
1383 break;
1384
1385 case APIC_DM_EXTINT:
1386 /*
1387 * Should only be called by kvm_apic_local_deliver() with LVT0,
1388 * before NMI watchdog was enabled. Already handled by
1389 * kvm_apic_accept_pic_intr().
1390 */
1391 break;
1392
1393 default:
1394 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1395 delivery_mode);
1396 break;
1397 }
1398 return result;
1399 }
1400
1401 /*
1402 * This routine identifies the destination vcpus mask meant to receive the
1403 * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1404 * out the destination vcpus array and set the bitmap or it traverses to
1405 * each available vcpu to identify the same.
1406 */
kvm_bitmap_or_dest_vcpus(struct kvm * kvm,struct kvm_lapic_irq * irq,unsigned long * vcpu_bitmap)1407 void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
1408 unsigned long *vcpu_bitmap)
1409 {
1410 struct kvm_lapic **dest_vcpu = NULL;
1411 struct kvm_lapic *src = NULL;
1412 struct kvm_apic_map *map;
1413 struct kvm_vcpu *vcpu;
1414 unsigned long bitmap, i;
1415 int vcpu_idx;
1416 bool ret;
1417
1418 rcu_read_lock();
1419 map = rcu_dereference(kvm->arch.apic_map);
1420
1421 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1422 &bitmap);
1423 if (ret) {
1424 for_each_set_bit(i, &bitmap, 16) {
1425 if (!dest_vcpu[i])
1426 continue;
1427 vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1428 __set_bit(vcpu_idx, vcpu_bitmap);
1429 }
1430 } else {
1431 kvm_for_each_vcpu(i, vcpu, kvm) {
1432 if (!kvm_apic_present(vcpu))
1433 continue;
1434 if (!kvm_apic_match_dest(vcpu, NULL,
1435 irq->shorthand,
1436 irq->dest_id,
1437 irq->dest_mode))
1438 continue;
1439 __set_bit(i, vcpu_bitmap);
1440 }
1441 }
1442 rcu_read_unlock();
1443 }
1444
kvm_apic_compare_prio(struct kvm_vcpu * vcpu1,struct kvm_vcpu * vcpu2)1445 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1446 {
1447 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1448 }
1449
kvm_ioapic_handles_vector(struct kvm_lapic * apic,int vector)1450 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1451 {
1452 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1453 }
1454
kvm_ioapic_send_eoi(struct kvm_lapic * apic,int vector)1455 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1456 {
1457 int trigger_mode;
1458
1459 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1460 if (!kvm_ioapic_handles_vector(apic, vector))
1461 return;
1462
1463 /* Request a KVM exit to inform the userspace IOAPIC. */
1464 if (irqchip_split(apic->vcpu->kvm)) {
1465 apic->vcpu->arch.pending_ioapic_eoi = vector;
1466 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1467 return;
1468 }
1469
1470 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1471 trigger_mode = IOAPIC_LEVEL_TRIG;
1472 else
1473 trigger_mode = IOAPIC_EDGE_TRIG;
1474
1475 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1476 }
1477
apic_set_eoi(struct kvm_lapic * apic)1478 static int apic_set_eoi(struct kvm_lapic *apic)
1479 {
1480 int vector = apic_find_highest_isr(apic);
1481
1482 trace_kvm_eoi(apic, vector);
1483
1484 /*
1485 * Not every write EOI will has corresponding ISR,
1486 * one example is when Kernel check timer on setup_IO_APIC
1487 */
1488 if (vector == -1)
1489 return vector;
1490
1491 apic_clear_isr(vector, apic);
1492 apic_update_ppr(apic);
1493
1494 if (kvm_hv_synic_has_vector(apic->vcpu, vector))
1495 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1496
1497 kvm_ioapic_send_eoi(apic, vector);
1498 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1499 return vector;
1500 }
1501
1502 /*
1503 * this interface assumes a trap-like exit, which has already finished
1504 * desired side effect including vISR and vPPR update.
1505 */
kvm_apic_set_eoi_accelerated(struct kvm_vcpu * vcpu,int vector)1506 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1507 {
1508 struct kvm_lapic *apic = vcpu->arch.apic;
1509
1510 trace_kvm_eoi(apic, vector);
1511
1512 kvm_ioapic_send_eoi(apic, vector);
1513 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1514 }
1515 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1516
kvm_apic_send_ipi(struct kvm_lapic * apic,u32 icr_low,u32 icr_high)1517 void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1518 {
1519 struct kvm_lapic_irq irq;
1520
1521 /* KVM has no delay and should always clear the BUSY/PENDING flag. */
1522 WARN_ON_ONCE(icr_low & APIC_ICR_BUSY);
1523
1524 irq.vector = icr_low & APIC_VECTOR_MASK;
1525 irq.delivery_mode = icr_low & APIC_MODE_MASK;
1526 irq.dest_mode = icr_low & APIC_DEST_MASK;
1527 irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1528 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1529 irq.shorthand = icr_low & APIC_SHORT_MASK;
1530 irq.msi_redir_hint = false;
1531 if (apic_x2apic_mode(apic))
1532 irq.dest_id = icr_high;
1533 else
1534 irq.dest_id = GET_XAPIC_DEST_FIELD(icr_high);
1535
1536 trace_kvm_apic_ipi(icr_low, irq.dest_id);
1537
1538 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1539 }
1540 EXPORT_SYMBOL_GPL(kvm_apic_send_ipi);
1541
apic_get_tmcct(struct kvm_lapic * apic)1542 static u32 apic_get_tmcct(struct kvm_lapic *apic)
1543 {
1544 ktime_t remaining, now;
1545 s64 ns;
1546
1547 ASSERT(apic != NULL);
1548
1549 /* if initial count is 0, current count should also be 0 */
1550 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1551 apic->lapic_timer.period == 0)
1552 return 0;
1553
1554 now = ktime_get();
1555 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1556 if (ktime_to_ns(remaining) < 0)
1557 remaining = 0;
1558
1559 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1560 return div64_u64(ns, (apic->vcpu->kvm->arch.apic_bus_cycle_ns *
1561 apic->divide_count));
1562 }
1563
__report_tpr_access(struct kvm_lapic * apic,bool write)1564 static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1565 {
1566 struct kvm_vcpu *vcpu = apic->vcpu;
1567 struct kvm_run *run = vcpu->run;
1568
1569 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1570 run->tpr_access.rip = kvm_rip_read(vcpu);
1571 run->tpr_access.is_write = write;
1572 }
1573
report_tpr_access(struct kvm_lapic * apic,bool write)1574 static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1575 {
1576 if (apic->vcpu->arch.tpr_access_reporting)
1577 __report_tpr_access(apic, write);
1578 }
1579
__apic_read(struct kvm_lapic * apic,unsigned int offset)1580 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1581 {
1582 u32 val = 0;
1583
1584 if (offset >= LAPIC_MMIO_LENGTH)
1585 return 0;
1586
1587 switch (offset) {
1588 case APIC_ARBPRI:
1589 break;
1590
1591 case APIC_TMCCT: /* Timer CCR */
1592 if (apic_lvtt_tscdeadline(apic))
1593 return 0;
1594
1595 val = apic_get_tmcct(apic);
1596 break;
1597 case APIC_PROCPRI:
1598 apic_update_ppr(apic);
1599 val = kvm_lapic_get_reg(apic, offset);
1600 break;
1601 case APIC_TASKPRI:
1602 report_tpr_access(apic, false);
1603 fallthrough;
1604 default:
1605 val = kvm_lapic_get_reg(apic, offset);
1606 break;
1607 }
1608
1609 return val;
1610 }
1611
to_lapic(struct kvm_io_device * dev)1612 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1613 {
1614 return container_of(dev, struct kvm_lapic, dev);
1615 }
1616
1617 #define APIC_REG_MASK(reg) (1ull << ((reg) >> 4))
1618 #define APIC_REGS_MASK(first, count) \
1619 (APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1620
kvm_lapic_readable_reg_mask(struct kvm_lapic * apic)1621 u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *apic)
1622 {
1623 /* Leave bits '0' for reserved and write-only registers. */
1624 u64 valid_reg_mask =
1625 APIC_REG_MASK(APIC_ID) |
1626 APIC_REG_MASK(APIC_LVR) |
1627 APIC_REG_MASK(APIC_TASKPRI) |
1628 APIC_REG_MASK(APIC_PROCPRI) |
1629 APIC_REG_MASK(APIC_LDR) |
1630 APIC_REG_MASK(APIC_SPIV) |
1631 APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1632 APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1633 APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1634 APIC_REG_MASK(APIC_ESR) |
1635 APIC_REG_MASK(APIC_ICR) |
1636 APIC_REG_MASK(APIC_LVTT) |
1637 APIC_REG_MASK(APIC_LVTTHMR) |
1638 APIC_REG_MASK(APIC_LVTPC) |
1639 APIC_REG_MASK(APIC_LVT0) |
1640 APIC_REG_MASK(APIC_LVT1) |
1641 APIC_REG_MASK(APIC_LVTERR) |
1642 APIC_REG_MASK(APIC_TMICT) |
1643 APIC_REG_MASK(APIC_TMCCT) |
1644 APIC_REG_MASK(APIC_TDCR);
1645
1646 if (kvm_lapic_lvt_supported(apic, LVT_CMCI))
1647 valid_reg_mask |= APIC_REG_MASK(APIC_LVTCMCI);
1648
1649 /* ARBPRI, DFR, and ICR2 are not valid in x2APIC mode. */
1650 if (!apic_x2apic_mode(apic))
1651 valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI) |
1652 APIC_REG_MASK(APIC_DFR) |
1653 APIC_REG_MASK(APIC_ICR2);
1654
1655 return valid_reg_mask;
1656 }
1657 EXPORT_SYMBOL_GPL(kvm_lapic_readable_reg_mask);
1658
kvm_lapic_reg_read(struct kvm_lapic * apic,u32 offset,int len,void * data)1659 static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1660 void *data)
1661 {
1662 unsigned char alignment = offset & 0xf;
1663 u32 result;
1664
1665 /*
1666 * WARN if KVM reads ICR in x2APIC mode, as it's an 8-byte register in
1667 * x2APIC and needs to be manually handled by the caller.
1668 */
1669 WARN_ON_ONCE(apic_x2apic_mode(apic) && offset == APIC_ICR);
1670
1671 if (alignment + len > 4)
1672 return 1;
1673
1674 if (offset > 0x3f0 ||
1675 !(kvm_lapic_readable_reg_mask(apic) & APIC_REG_MASK(offset)))
1676 return 1;
1677
1678 result = __apic_read(apic, offset & ~0xf);
1679
1680 trace_kvm_apic_read(offset, result);
1681
1682 switch (len) {
1683 case 1:
1684 case 2:
1685 case 4:
1686 memcpy(data, (char *)&result + alignment, len);
1687 break;
1688 default:
1689 printk(KERN_ERR "Local APIC read with len = %x, "
1690 "should be 1,2, or 4 instead\n", len);
1691 break;
1692 }
1693 return 0;
1694 }
1695
apic_mmio_in_range(struct kvm_lapic * apic,gpa_t addr)1696 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1697 {
1698 return addr >= apic->base_address &&
1699 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1700 }
1701
apic_mmio_read(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t address,int len,void * data)1702 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1703 gpa_t address, int len, void *data)
1704 {
1705 struct kvm_lapic *apic = to_lapic(this);
1706 u32 offset = address - apic->base_address;
1707
1708 if (!apic_mmio_in_range(apic, address))
1709 return -EOPNOTSUPP;
1710
1711 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1712 if (!kvm_check_has_quirk(vcpu->kvm,
1713 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1714 return -EOPNOTSUPP;
1715
1716 memset(data, 0xff, len);
1717 return 0;
1718 }
1719
1720 kvm_lapic_reg_read(apic, offset, len, data);
1721
1722 return 0;
1723 }
1724
update_divide_count(struct kvm_lapic * apic)1725 static void update_divide_count(struct kvm_lapic *apic)
1726 {
1727 u32 tmp1, tmp2, tdcr;
1728
1729 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1730 tmp1 = tdcr & 0xf;
1731 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1732 apic->divide_count = 0x1 << (tmp2 & 0x7);
1733 }
1734
limit_periodic_timer_frequency(struct kvm_lapic * apic)1735 static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1736 {
1737 /*
1738 * Do not allow the guest to program periodic timers with small
1739 * interval, since the hrtimers are not throttled by the host
1740 * scheduler.
1741 */
1742 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1743 s64 min_period = min_timer_period_us * 1000LL;
1744
1745 if (apic->lapic_timer.period < min_period) {
1746 pr_info_once(
1747 "vcpu %i: requested %lld ns "
1748 "lapic timer period limited to %lld ns\n",
1749 apic->vcpu->vcpu_id,
1750 apic->lapic_timer.period, min_period);
1751 apic->lapic_timer.period = min_period;
1752 }
1753 }
1754 }
1755
1756 static void cancel_hv_timer(struct kvm_lapic *apic);
1757
cancel_apic_timer(struct kvm_lapic * apic)1758 static void cancel_apic_timer(struct kvm_lapic *apic)
1759 {
1760 hrtimer_cancel(&apic->lapic_timer.timer);
1761 preempt_disable();
1762 if (apic->lapic_timer.hv_timer_in_use)
1763 cancel_hv_timer(apic);
1764 preempt_enable();
1765 atomic_set(&apic->lapic_timer.pending, 0);
1766 }
1767
apic_update_lvtt(struct kvm_lapic * apic)1768 static void apic_update_lvtt(struct kvm_lapic *apic)
1769 {
1770 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1771 apic->lapic_timer.timer_mode_mask;
1772
1773 if (apic->lapic_timer.timer_mode != timer_mode) {
1774 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1775 APIC_LVT_TIMER_TSCDEADLINE)) {
1776 cancel_apic_timer(apic);
1777 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1778 apic->lapic_timer.period = 0;
1779 apic->lapic_timer.tscdeadline = 0;
1780 }
1781 apic->lapic_timer.timer_mode = timer_mode;
1782 limit_periodic_timer_frequency(apic);
1783 }
1784 }
1785
1786 /*
1787 * On APICv, this test will cause a busy wait
1788 * during a higher-priority task.
1789 */
1790
lapic_timer_int_injected(struct kvm_vcpu * vcpu)1791 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1792 {
1793 struct kvm_lapic *apic = vcpu->arch.apic;
1794 u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1795
1796 if (kvm_apic_hw_enabled(apic)) {
1797 int vec = reg & APIC_VECTOR_MASK;
1798 void *bitmap = apic->regs + APIC_ISR;
1799
1800 if (apic->apicv_active)
1801 bitmap = apic->regs + APIC_IRR;
1802
1803 if (apic_test_vector(vec, bitmap))
1804 return true;
1805 }
1806 return false;
1807 }
1808
__wait_lapic_expire(struct kvm_vcpu * vcpu,u64 guest_cycles)1809 static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
1810 {
1811 u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1812
1813 /*
1814 * If the guest TSC is running at a different ratio than the host, then
1815 * convert the delay to nanoseconds to achieve an accurate delay. Note
1816 * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1817 * always for VMX enabled hardware.
1818 */
1819 if (vcpu->arch.tsc_scaling_ratio == kvm_caps.default_tsc_scaling_ratio) {
1820 __delay(min(guest_cycles,
1821 nsec_to_cycles(vcpu, timer_advance_ns)));
1822 } else {
1823 u64 delay_ns = guest_cycles * 1000000ULL;
1824 do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1825 ndelay(min_t(u32, delay_ns, timer_advance_ns));
1826 }
1827 }
1828
adjust_lapic_timer_advance(struct kvm_vcpu * vcpu,s64 advance_expire_delta)1829 static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1830 s64 advance_expire_delta)
1831 {
1832 struct kvm_lapic *apic = vcpu->arch.apic;
1833 u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1834 u64 ns;
1835
1836 /* Do not adjust for tiny fluctuations or large random spikes. */
1837 if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
1838 abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
1839 return;
1840
1841 /* too early */
1842 if (advance_expire_delta < 0) {
1843 ns = -advance_expire_delta * 1000000ULL;
1844 do_div(ns, vcpu->arch.virtual_tsc_khz);
1845 timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1846 } else {
1847 /* too late */
1848 ns = advance_expire_delta * 1000000ULL;
1849 do_div(ns, vcpu->arch.virtual_tsc_khz);
1850 timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1851 }
1852
1853 if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1854 timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1855 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1856 }
1857
__kvm_wait_lapic_expire(struct kvm_vcpu * vcpu)1858 static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1859 {
1860 struct kvm_lapic *apic = vcpu->arch.apic;
1861 u64 guest_tsc, tsc_deadline;
1862
1863 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1864 apic->lapic_timer.expired_tscdeadline = 0;
1865 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1866 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1867
1868 adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
1869
1870 /*
1871 * If the timer fired early, reread the TSC to account for the overhead
1872 * of the above adjustment to avoid waiting longer than is necessary.
1873 */
1874 if (guest_tsc < tsc_deadline)
1875 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1876
1877 if (guest_tsc < tsc_deadline)
1878 __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
1879 }
1880
kvm_wait_lapic_expire(struct kvm_vcpu * vcpu)1881 void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1882 {
1883 if (lapic_in_kernel(vcpu) &&
1884 vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1885 vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1886 lapic_timer_int_injected(vcpu))
1887 __kvm_wait_lapic_expire(vcpu);
1888 }
1889 EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
1890
kvm_apic_inject_pending_timer_irqs(struct kvm_lapic * apic)1891 static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1892 {
1893 struct kvm_timer *ktimer = &apic->lapic_timer;
1894
1895 kvm_apic_local_deliver(apic, APIC_LVTT);
1896 if (apic_lvtt_tscdeadline(apic)) {
1897 ktimer->tscdeadline = 0;
1898 } else if (apic_lvtt_oneshot(apic)) {
1899 ktimer->tscdeadline = 0;
1900 ktimer->target_expiration = 0;
1901 }
1902 }
1903
apic_timer_expired(struct kvm_lapic * apic,bool from_timer_fn)1904 static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1905 {
1906 struct kvm_vcpu *vcpu = apic->vcpu;
1907 struct kvm_timer *ktimer = &apic->lapic_timer;
1908
1909 if (atomic_read(&apic->lapic_timer.pending))
1910 return;
1911
1912 if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1913 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1914
1915 if (!from_timer_fn && apic->apicv_active) {
1916 WARN_ON(kvm_get_running_vcpu() != vcpu);
1917 kvm_apic_inject_pending_timer_irqs(apic);
1918 return;
1919 }
1920
1921 if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1922 /*
1923 * Ensure the guest's timer has truly expired before posting an
1924 * interrupt. Open code the relevant checks to avoid querying
1925 * lapic_timer_int_injected(), which will be false since the
1926 * interrupt isn't yet injected. Waiting until after injecting
1927 * is not an option since that won't help a posted interrupt.
1928 */
1929 if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1930 vcpu->arch.apic->lapic_timer.timer_advance_ns)
1931 __kvm_wait_lapic_expire(vcpu);
1932 kvm_apic_inject_pending_timer_irqs(apic);
1933 return;
1934 }
1935
1936 atomic_inc(&apic->lapic_timer.pending);
1937 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1938 if (from_timer_fn)
1939 kvm_vcpu_kick(vcpu);
1940 }
1941
start_sw_tscdeadline(struct kvm_lapic * apic)1942 static void start_sw_tscdeadline(struct kvm_lapic *apic)
1943 {
1944 struct kvm_timer *ktimer = &apic->lapic_timer;
1945 u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
1946 u64 ns = 0;
1947 ktime_t expire;
1948 struct kvm_vcpu *vcpu = apic->vcpu;
1949 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1950 unsigned long flags;
1951 ktime_t now;
1952
1953 if (unlikely(!tscdeadline || !this_tsc_khz))
1954 return;
1955
1956 local_irq_save(flags);
1957
1958 now = ktime_get();
1959 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1960
1961 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1962 do_div(ns, this_tsc_khz);
1963
1964 if (likely(tscdeadline > guest_tsc) &&
1965 likely(ns > apic->lapic_timer.timer_advance_ns)) {
1966 expire = ktime_add_ns(now, ns);
1967 expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
1968 hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
1969 } else
1970 apic_timer_expired(apic, false);
1971
1972 local_irq_restore(flags);
1973 }
1974
tmict_to_ns(struct kvm_lapic * apic,u32 tmict)1975 static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1976 {
1977 return (u64)tmict * apic->vcpu->kvm->arch.apic_bus_cycle_ns *
1978 (u64)apic->divide_count;
1979 }
1980
update_target_expiration(struct kvm_lapic * apic,uint32_t old_divisor)1981 static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1982 {
1983 ktime_t now, remaining;
1984 u64 ns_remaining_old, ns_remaining_new;
1985
1986 apic->lapic_timer.period =
1987 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1988 limit_periodic_timer_frequency(apic);
1989
1990 now = ktime_get();
1991 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1992 if (ktime_to_ns(remaining) < 0)
1993 remaining = 0;
1994
1995 ns_remaining_old = ktime_to_ns(remaining);
1996 ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1997 apic->divide_count, old_divisor);
1998
1999 apic->lapic_timer.tscdeadline +=
2000 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
2001 nsec_to_cycles(apic->vcpu, ns_remaining_old);
2002 apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
2003 }
2004
set_target_expiration(struct kvm_lapic * apic,u32 count_reg)2005 static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
2006 {
2007 ktime_t now;
2008 u64 tscl = rdtsc();
2009 s64 deadline;
2010
2011 now = ktime_get();
2012 apic->lapic_timer.period =
2013 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
2014
2015 if (!apic->lapic_timer.period) {
2016 apic->lapic_timer.tscdeadline = 0;
2017 return false;
2018 }
2019
2020 limit_periodic_timer_frequency(apic);
2021 deadline = apic->lapic_timer.period;
2022
2023 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
2024 if (unlikely(count_reg != APIC_TMICT)) {
2025 deadline = tmict_to_ns(apic,
2026 kvm_lapic_get_reg(apic, count_reg));
2027 if (unlikely(deadline <= 0)) {
2028 if (apic_lvtt_period(apic))
2029 deadline = apic->lapic_timer.period;
2030 else
2031 deadline = 0;
2032 }
2033 else if (unlikely(deadline > apic->lapic_timer.period)) {
2034 pr_info_ratelimited(
2035 "vcpu %i: requested lapic timer restore with "
2036 "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
2037 "Using initial count to start timer.\n",
2038 apic->vcpu->vcpu_id,
2039 count_reg,
2040 kvm_lapic_get_reg(apic, count_reg),
2041 deadline, apic->lapic_timer.period);
2042 kvm_lapic_set_reg(apic, count_reg, 0);
2043 deadline = apic->lapic_timer.period;
2044 }
2045 }
2046 }
2047
2048 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2049 nsec_to_cycles(apic->vcpu, deadline);
2050 apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
2051
2052 return true;
2053 }
2054
advance_periodic_target_expiration(struct kvm_lapic * apic)2055 static void advance_periodic_target_expiration(struct kvm_lapic *apic)
2056 {
2057 ktime_t now = ktime_get();
2058 u64 tscl = rdtsc();
2059 ktime_t delta;
2060
2061 /*
2062 * Synchronize both deadlines to the same time source or
2063 * differences in the periods (caused by differences in the
2064 * underlying clocks or numerical approximation errors) will
2065 * cause the two to drift apart over time as the errors
2066 * accumulate.
2067 */
2068 apic->lapic_timer.target_expiration =
2069 ktime_add_ns(apic->lapic_timer.target_expiration,
2070 apic->lapic_timer.period);
2071 delta = ktime_sub(apic->lapic_timer.target_expiration, now);
2072 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2073 nsec_to_cycles(apic->vcpu, delta);
2074 }
2075
start_sw_period(struct kvm_lapic * apic)2076 static void start_sw_period(struct kvm_lapic *apic)
2077 {
2078 if (!apic->lapic_timer.period)
2079 return;
2080
2081 if (ktime_after(ktime_get(),
2082 apic->lapic_timer.target_expiration)) {
2083 apic_timer_expired(apic, false);
2084
2085 if (apic_lvtt_oneshot(apic))
2086 return;
2087
2088 advance_periodic_target_expiration(apic);
2089 }
2090
2091 hrtimer_start(&apic->lapic_timer.timer,
2092 apic->lapic_timer.target_expiration,
2093 HRTIMER_MODE_ABS_HARD);
2094 }
2095
kvm_lapic_hv_timer_in_use(struct kvm_vcpu * vcpu)2096 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
2097 {
2098 if (!lapic_in_kernel(vcpu))
2099 return false;
2100
2101 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
2102 }
2103
cancel_hv_timer(struct kvm_lapic * apic)2104 static void cancel_hv_timer(struct kvm_lapic *apic)
2105 {
2106 WARN_ON(preemptible());
2107 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2108 kvm_x86_call(cancel_hv_timer)(apic->vcpu);
2109 apic->lapic_timer.hv_timer_in_use = false;
2110 }
2111
start_hv_timer(struct kvm_lapic * apic)2112 static bool start_hv_timer(struct kvm_lapic *apic)
2113 {
2114 struct kvm_timer *ktimer = &apic->lapic_timer;
2115 struct kvm_vcpu *vcpu = apic->vcpu;
2116 bool expired;
2117
2118 WARN_ON(preemptible());
2119 if (!kvm_can_use_hv_timer(vcpu))
2120 return false;
2121
2122 if (!ktimer->tscdeadline)
2123 return false;
2124
2125 if (kvm_x86_call(set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
2126 return false;
2127
2128 ktimer->hv_timer_in_use = true;
2129 hrtimer_cancel(&ktimer->timer);
2130
2131 /*
2132 * To simplify handling the periodic timer, leave the hv timer running
2133 * even if the deadline timer has expired, i.e. rely on the resulting
2134 * VM-Exit to recompute the periodic timer's target expiration.
2135 */
2136 if (!apic_lvtt_period(apic)) {
2137 /*
2138 * Cancel the hv timer if the sw timer fired while the hv timer
2139 * was being programmed, or if the hv timer itself expired.
2140 */
2141 if (atomic_read(&ktimer->pending)) {
2142 cancel_hv_timer(apic);
2143 } else if (expired) {
2144 apic_timer_expired(apic, false);
2145 cancel_hv_timer(apic);
2146 }
2147 }
2148
2149 trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
2150
2151 return true;
2152 }
2153
start_sw_timer(struct kvm_lapic * apic)2154 static void start_sw_timer(struct kvm_lapic *apic)
2155 {
2156 struct kvm_timer *ktimer = &apic->lapic_timer;
2157
2158 WARN_ON(preemptible());
2159 if (apic->lapic_timer.hv_timer_in_use)
2160 cancel_hv_timer(apic);
2161 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
2162 return;
2163
2164 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2165 start_sw_period(apic);
2166 else if (apic_lvtt_tscdeadline(apic))
2167 start_sw_tscdeadline(apic);
2168 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
2169 }
2170
restart_apic_timer(struct kvm_lapic * apic)2171 static void restart_apic_timer(struct kvm_lapic *apic)
2172 {
2173 preempt_disable();
2174
2175 if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
2176 goto out;
2177
2178 if (!start_hv_timer(apic))
2179 start_sw_timer(apic);
2180 out:
2181 preempt_enable();
2182 }
2183
kvm_lapic_expired_hv_timer(struct kvm_vcpu * vcpu)2184 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
2185 {
2186 struct kvm_lapic *apic = vcpu->arch.apic;
2187
2188 preempt_disable();
2189 /* If the preempt notifier has already run, it also called apic_timer_expired */
2190 if (!apic->lapic_timer.hv_timer_in_use)
2191 goto out;
2192 WARN_ON(kvm_vcpu_is_blocking(vcpu));
2193 apic_timer_expired(apic, false);
2194 cancel_hv_timer(apic);
2195
2196 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
2197 advance_periodic_target_expiration(apic);
2198 restart_apic_timer(apic);
2199 }
2200 out:
2201 preempt_enable();
2202 }
2203 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
2204
kvm_lapic_switch_to_hv_timer(struct kvm_vcpu * vcpu)2205 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
2206 {
2207 restart_apic_timer(vcpu->arch.apic);
2208 }
2209
kvm_lapic_switch_to_sw_timer(struct kvm_vcpu * vcpu)2210 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
2211 {
2212 struct kvm_lapic *apic = vcpu->arch.apic;
2213
2214 preempt_disable();
2215 /* Possibly the TSC deadline timer is not enabled yet */
2216 if (apic->lapic_timer.hv_timer_in_use)
2217 start_sw_timer(apic);
2218 preempt_enable();
2219 }
2220
kvm_lapic_restart_hv_timer(struct kvm_vcpu * vcpu)2221 void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
2222 {
2223 struct kvm_lapic *apic = vcpu->arch.apic;
2224
2225 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2226 restart_apic_timer(apic);
2227 }
2228
__start_apic_timer(struct kvm_lapic * apic,u32 count_reg)2229 static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
2230 {
2231 atomic_set(&apic->lapic_timer.pending, 0);
2232
2233 if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2234 && !set_target_expiration(apic, count_reg))
2235 return;
2236
2237 restart_apic_timer(apic);
2238 }
2239
start_apic_timer(struct kvm_lapic * apic)2240 static void start_apic_timer(struct kvm_lapic *apic)
2241 {
2242 __start_apic_timer(apic, APIC_TMICT);
2243 }
2244
apic_manage_nmi_watchdog(struct kvm_lapic * apic,u32 lvt0_val)2245 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
2246 {
2247 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
2248
2249 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
2250 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
2251 if (lvt0_in_nmi_mode) {
2252 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2253 } else
2254 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2255 }
2256 }
2257
get_lvt_index(u32 reg)2258 static int get_lvt_index(u32 reg)
2259 {
2260 if (reg == APIC_LVTCMCI)
2261 return LVT_CMCI;
2262 if (reg < APIC_LVTT || reg > APIC_LVTERR)
2263 return -1;
2264 return array_index_nospec(
2265 (reg - APIC_LVTT) >> 4, KVM_APIC_MAX_NR_LVT_ENTRIES);
2266 }
2267
kvm_lapic_reg_write(struct kvm_lapic * apic,u32 reg,u32 val)2268 static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
2269 {
2270 int ret = 0;
2271
2272 trace_kvm_apic_write(reg, val);
2273
2274 switch (reg) {
2275 case APIC_ID: /* Local APIC ID */
2276 if (!apic_x2apic_mode(apic)) {
2277 kvm_apic_set_xapic_id(apic, val >> 24);
2278 } else {
2279 ret = 1;
2280 }
2281 break;
2282
2283 case APIC_TASKPRI:
2284 report_tpr_access(apic, true);
2285 apic_set_tpr(apic, val & 0xff);
2286 break;
2287
2288 case APIC_EOI:
2289 apic_set_eoi(apic);
2290 break;
2291
2292 case APIC_LDR:
2293 if (!apic_x2apic_mode(apic))
2294 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
2295 else
2296 ret = 1;
2297 break;
2298
2299 case APIC_DFR:
2300 if (!apic_x2apic_mode(apic))
2301 kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2302 else
2303 ret = 1;
2304 break;
2305
2306 case APIC_SPIV: {
2307 u32 mask = 0x3ff;
2308 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2309 mask |= APIC_SPIV_DIRECTED_EOI;
2310 apic_set_spiv(apic, val & mask);
2311 if (!(val & APIC_SPIV_APIC_ENABLED)) {
2312 int i;
2313
2314 for (i = 0; i < apic->nr_lvt_entries; i++) {
2315 kvm_lapic_set_reg(apic, APIC_LVTx(i),
2316 kvm_lapic_get_reg(apic, APIC_LVTx(i)) | APIC_LVT_MASKED);
2317 }
2318 apic_update_lvtt(apic);
2319 atomic_set(&apic->lapic_timer.pending, 0);
2320
2321 }
2322 break;
2323 }
2324 case APIC_ICR:
2325 WARN_ON_ONCE(apic_x2apic_mode(apic));
2326
2327 /* No delay here, so we always clear the pending bit */
2328 val &= ~APIC_ICR_BUSY;
2329 kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2330 kvm_lapic_set_reg(apic, APIC_ICR, val);
2331 break;
2332 case APIC_ICR2:
2333 if (apic_x2apic_mode(apic))
2334 ret = 1;
2335 else
2336 kvm_lapic_set_reg(apic, APIC_ICR2, val & 0xff000000);
2337 break;
2338
2339 case APIC_LVT0:
2340 apic_manage_nmi_watchdog(apic, val);
2341 fallthrough;
2342 case APIC_LVTTHMR:
2343 case APIC_LVTPC:
2344 case APIC_LVT1:
2345 case APIC_LVTERR:
2346 case APIC_LVTCMCI: {
2347 u32 index = get_lvt_index(reg);
2348 if (!kvm_lapic_lvt_supported(apic, index)) {
2349 ret = 1;
2350 break;
2351 }
2352 if (!kvm_apic_sw_enabled(apic))
2353 val |= APIC_LVT_MASKED;
2354 val &= apic_lvt_mask[index];
2355 kvm_lapic_set_reg(apic, reg, val);
2356 break;
2357 }
2358
2359 case APIC_LVTT:
2360 if (!kvm_apic_sw_enabled(apic))
2361 val |= APIC_LVT_MASKED;
2362 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
2363 kvm_lapic_set_reg(apic, APIC_LVTT, val);
2364 apic_update_lvtt(apic);
2365 break;
2366
2367 case APIC_TMICT:
2368 if (apic_lvtt_tscdeadline(apic))
2369 break;
2370
2371 cancel_apic_timer(apic);
2372 kvm_lapic_set_reg(apic, APIC_TMICT, val);
2373 start_apic_timer(apic);
2374 break;
2375
2376 case APIC_TDCR: {
2377 uint32_t old_divisor = apic->divide_count;
2378
2379 kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2380 update_divide_count(apic);
2381 if (apic->divide_count != old_divisor &&
2382 apic->lapic_timer.period) {
2383 hrtimer_cancel(&apic->lapic_timer.timer);
2384 update_target_expiration(apic, old_divisor);
2385 restart_apic_timer(apic);
2386 }
2387 break;
2388 }
2389 case APIC_ESR:
2390 if (apic_x2apic_mode(apic) && val != 0)
2391 ret = 1;
2392 break;
2393
2394 case APIC_SELF_IPI:
2395 /*
2396 * Self-IPI exists only when x2APIC is enabled. Bits 7:0 hold
2397 * the vector, everything else is reserved.
2398 */
2399 if (!apic_x2apic_mode(apic) || (val & ~APIC_VECTOR_MASK))
2400 ret = 1;
2401 else
2402 kvm_apic_send_ipi(apic, APIC_DEST_SELF | val, 0);
2403 break;
2404 default:
2405 ret = 1;
2406 break;
2407 }
2408
2409 /*
2410 * Recalculate APIC maps if necessary, e.g. if the software enable bit
2411 * was toggled, the APIC ID changed, etc... The maps are marked dirty
2412 * on relevant changes, i.e. this is a nop for most writes.
2413 */
2414 kvm_recalculate_apic_map(apic->vcpu->kvm);
2415
2416 return ret;
2417 }
2418
apic_mmio_write(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t address,int len,const void * data)2419 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
2420 gpa_t address, int len, const void *data)
2421 {
2422 struct kvm_lapic *apic = to_lapic(this);
2423 unsigned int offset = address - apic->base_address;
2424 u32 val;
2425
2426 if (!apic_mmio_in_range(apic, address))
2427 return -EOPNOTSUPP;
2428
2429 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2430 if (!kvm_check_has_quirk(vcpu->kvm,
2431 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2432 return -EOPNOTSUPP;
2433
2434 return 0;
2435 }
2436
2437 /*
2438 * APIC register must be aligned on 128-bits boundary.
2439 * 32/64/128 bits registers must be accessed thru 32 bits.
2440 * Refer SDM 8.4.1
2441 */
2442 if (len != 4 || (offset & 0xf))
2443 return 0;
2444
2445 val = *(u32*)data;
2446
2447 kvm_lapic_reg_write(apic, offset & 0xff0, val);
2448
2449 return 0;
2450 }
2451
kvm_lapic_set_eoi(struct kvm_vcpu * vcpu)2452 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2453 {
2454 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2455 }
2456 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2457
2458 /* emulate APIC access in a trap manner */
kvm_apic_write_nodecode(struct kvm_vcpu * vcpu,u32 offset)2459 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2460 {
2461 struct kvm_lapic *apic = vcpu->arch.apic;
2462
2463 /*
2464 * ICR is a single 64-bit register when x2APIC is enabled, all others
2465 * registers hold 32-bit values. For legacy xAPIC, ICR writes need to
2466 * go down the common path to get the upper half from ICR2.
2467 *
2468 * Note, using the write helpers may incur an unnecessary write to the
2469 * virtual APIC state, but KVM needs to conditionally modify the value
2470 * in certain cases, e.g. to clear the ICR busy bit. The cost of extra
2471 * conditional branches is likely a wash relative to the cost of the
2472 * maybe-unecessary write, and both are in the noise anyways.
2473 */
2474 if (apic_x2apic_mode(apic) && offset == APIC_ICR)
2475 kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR));
2476 else
2477 kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
2478 }
2479 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2480
kvm_free_lapic(struct kvm_vcpu * vcpu)2481 void kvm_free_lapic(struct kvm_vcpu *vcpu)
2482 {
2483 struct kvm_lapic *apic = vcpu->arch.apic;
2484
2485 if (!vcpu->arch.apic) {
2486 static_branch_dec(&kvm_has_noapic_vcpu);
2487 return;
2488 }
2489
2490 hrtimer_cancel(&apic->lapic_timer.timer);
2491
2492 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2493 static_branch_slow_dec_deferred(&apic_hw_disabled);
2494
2495 if (!apic->sw_enabled)
2496 static_branch_slow_dec_deferred(&apic_sw_disabled);
2497
2498 if (apic->regs)
2499 free_page((unsigned long)apic->regs);
2500
2501 kfree(apic);
2502 }
2503
2504 /*
2505 *----------------------------------------------------------------------
2506 * LAPIC interface
2507 *----------------------------------------------------------------------
2508 */
kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu * vcpu)2509 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2510 {
2511 struct kvm_lapic *apic = vcpu->arch.apic;
2512
2513 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2514 return 0;
2515
2516 return apic->lapic_timer.tscdeadline;
2517 }
2518
kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu * vcpu,u64 data)2519 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2520 {
2521 struct kvm_lapic *apic = vcpu->arch.apic;
2522
2523 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2524 return;
2525
2526 hrtimer_cancel(&apic->lapic_timer.timer);
2527 apic->lapic_timer.tscdeadline = data;
2528 start_apic_timer(apic);
2529 }
2530
kvm_lapic_set_tpr(struct kvm_vcpu * vcpu,unsigned long cr8)2531 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2532 {
2533 apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4);
2534 }
2535
kvm_lapic_get_cr8(struct kvm_vcpu * vcpu)2536 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2537 {
2538 u64 tpr;
2539
2540 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2541
2542 return (tpr & 0xf0) >> 4;
2543 }
2544
kvm_lapic_set_base(struct kvm_vcpu * vcpu,u64 value)2545 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
2546 {
2547 u64 old_value = vcpu->arch.apic_base;
2548 struct kvm_lapic *apic = vcpu->arch.apic;
2549
2550 vcpu->arch.apic_base = value;
2551
2552 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2553 kvm_update_cpuid_runtime(vcpu);
2554
2555 if (!apic)
2556 return;
2557
2558 /* update jump label if enable bit changes */
2559 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2560 if (value & MSR_IA32_APICBASE_ENABLE) {
2561 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2562 static_branch_slow_dec_deferred(&apic_hw_disabled);
2563 /* Check if there are APF page ready requests pending */
2564 kvm_make_request(KVM_REQ_APF_READY, vcpu);
2565 } else {
2566 static_branch_inc(&apic_hw_disabled.key);
2567 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2568 }
2569 }
2570
2571 if ((old_value ^ value) & X2APIC_ENABLE) {
2572 if (value & X2APIC_ENABLE)
2573 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2574 else if (value & MSR_IA32_APICBASE_ENABLE)
2575 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2576 }
2577
2578 if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) {
2579 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2580 kvm_x86_call(set_virtual_apic_mode)(vcpu);
2581 }
2582
2583 apic->base_address = apic->vcpu->arch.apic_base &
2584 MSR_IA32_APICBASE_BASE;
2585
2586 if ((value & MSR_IA32_APICBASE_ENABLE) &&
2587 apic->base_address != APIC_DEFAULT_PHYS_BASE) {
2588 kvm_set_apicv_inhibit(apic->vcpu->kvm,
2589 APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
2590 }
2591 }
2592
kvm_apic_update_apicv(struct kvm_vcpu * vcpu)2593 void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
2594 {
2595 struct kvm_lapic *apic = vcpu->arch.apic;
2596
2597 if (apic->apicv_active) {
2598 /* irr_pending is always true when apicv is activated. */
2599 apic->irr_pending = true;
2600 apic->isr_count = 1;
2601 } else {
2602 /*
2603 * Don't clear irr_pending, searching the IRR can race with
2604 * updates from the CPU as APICv is still active from hardware's
2605 * perspective. The flag will be cleared as appropriate when
2606 * KVM injects the interrupt.
2607 */
2608 apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2609 }
2610 apic->highest_isr_cache = -1;
2611 }
2612
kvm_alloc_apic_access_page(struct kvm * kvm)2613 int kvm_alloc_apic_access_page(struct kvm *kvm)
2614 {
2615 struct page *page;
2616 void __user *hva;
2617 int ret = 0;
2618
2619 mutex_lock(&kvm->slots_lock);
2620 if (kvm->arch.apic_access_memslot_enabled ||
2621 kvm->arch.apic_access_memslot_inhibited)
2622 goto out;
2623
2624 hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
2625 APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
2626 if (IS_ERR(hva)) {
2627 ret = PTR_ERR(hva);
2628 goto out;
2629 }
2630
2631 page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
2632 if (is_error_page(page)) {
2633 ret = -EFAULT;
2634 goto out;
2635 }
2636
2637 /*
2638 * Do not pin the page in memory, so that memory hot-unplug
2639 * is able to migrate it.
2640 */
2641 put_page(page);
2642 kvm->arch.apic_access_memslot_enabled = true;
2643 out:
2644 mutex_unlock(&kvm->slots_lock);
2645 return ret;
2646 }
2647 EXPORT_SYMBOL_GPL(kvm_alloc_apic_access_page);
2648
kvm_inhibit_apic_access_page(struct kvm_vcpu * vcpu)2649 void kvm_inhibit_apic_access_page(struct kvm_vcpu *vcpu)
2650 {
2651 struct kvm *kvm = vcpu->kvm;
2652
2653 if (!kvm->arch.apic_access_memslot_enabled)
2654 return;
2655
2656 kvm_vcpu_srcu_read_unlock(vcpu);
2657
2658 mutex_lock(&kvm->slots_lock);
2659
2660 if (kvm->arch.apic_access_memslot_enabled) {
2661 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
2662 /*
2663 * Clear "enabled" after the memslot is deleted so that a
2664 * different vCPU doesn't get a false negative when checking
2665 * the flag out of slots_lock. No additional memory barrier is
2666 * needed as modifying memslots requires waiting other vCPUs to
2667 * drop SRCU (see above), and false positives are ok as the
2668 * flag is rechecked after acquiring slots_lock.
2669 */
2670 kvm->arch.apic_access_memslot_enabled = false;
2671
2672 /*
2673 * Mark the memslot as inhibited to prevent reallocating the
2674 * memslot during vCPU creation, e.g. if a vCPU is hotplugged.
2675 */
2676 kvm->arch.apic_access_memslot_inhibited = true;
2677 }
2678
2679 mutex_unlock(&kvm->slots_lock);
2680
2681 kvm_vcpu_srcu_read_lock(vcpu);
2682 }
2683
kvm_lapic_reset(struct kvm_vcpu * vcpu,bool init_event)2684 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2685 {
2686 struct kvm_lapic *apic = vcpu->arch.apic;
2687 u64 msr_val;
2688 int i;
2689
2690 kvm_x86_call(apicv_pre_state_restore)(vcpu);
2691
2692 if (!init_event) {
2693 msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
2694 if (kvm_vcpu_is_reset_bsp(vcpu))
2695 msr_val |= MSR_IA32_APICBASE_BSP;
2696 kvm_lapic_set_base(vcpu, msr_val);
2697 }
2698
2699 if (!apic)
2700 return;
2701
2702 /* Stop the timer in case it's a reset to an active apic */
2703 hrtimer_cancel(&apic->lapic_timer.timer);
2704
2705 /* The xAPIC ID is set at RESET even if the APIC was already enabled. */
2706 if (!init_event)
2707 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2708 kvm_apic_set_version(apic->vcpu);
2709
2710 for (i = 0; i < apic->nr_lvt_entries; i++)
2711 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
2712 apic_update_lvtt(apic);
2713 if (kvm_vcpu_is_reset_bsp(vcpu) &&
2714 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2715 kvm_lapic_set_reg(apic, APIC_LVT0,
2716 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2717 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2718
2719 kvm_apic_set_dfr(apic, 0xffffffffU);
2720 apic_set_spiv(apic, 0xff);
2721 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2722 if (!apic_x2apic_mode(apic))
2723 kvm_apic_set_ldr(apic, 0);
2724 kvm_lapic_set_reg(apic, APIC_ESR, 0);
2725 if (!apic_x2apic_mode(apic)) {
2726 kvm_lapic_set_reg(apic, APIC_ICR, 0);
2727 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2728 } else {
2729 kvm_lapic_set_reg64(apic, APIC_ICR, 0);
2730 }
2731 kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2732 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2733 for (i = 0; i < 8; i++) {
2734 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2735 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2736 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2737 }
2738 kvm_apic_update_apicv(vcpu);
2739 update_divide_count(apic);
2740 atomic_set(&apic->lapic_timer.pending, 0);
2741
2742 vcpu->arch.pv_eoi.msr_val = 0;
2743 apic_update_ppr(apic);
2744 if (apic->apicv_active) {
2745 kvm_x86_call(apicv_post_state_restore)(vcpu);
2746 kvm_x86_call(hwapic_irr_update)(vcpu, -1);
2747 kvm_x86_call(hwapic_isr_update)(-1);
2748 }
2749
2750 vcpu->arch.apic_arb_prio = 0;
2751 vcpu->arch.apic_attention = 0;
2752
2753 kvm_recalculate_apic_map(vcpu->kvm);
2754 }
2755
2756 /*
2757 *----------------------------------------------------------------------
2758 * timer interface
2759 *----------------------------------------------------------------------
2760 */
2761
lapic_is_periodic(struct kvm_lapic * apic)2762 static bool lapic_is_periodic(struct kvm_lapic *apic)
2763 {
2764 return apic_lvtt_period(apic);
2765 }
2766
apic_has_pending_timer(struct kvm_vcpu * vcpu)2767 int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2768 {
2769 struct kvm_lapic *apic = vcpu->arch.apic;
2770
2771 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2772 return atomic_read(&apic->lapic_timer.pending);
2773
2774 return 0;
2775 }
2776
kvm_apic_local_deliver(struct kvm_lapic * apic,int lvt_type)2777 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2778 {
2779 u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2780 int vector, mode, trig_mode;
2781 int r;
2782
2783 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2784 vector = reg & APIC_VECTOR_MASK;
2785 mode = reg & APIC_MODE_MASK;
2786 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2787
2788 r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
2789 if (r && lvt_type == APIC_LVTPC &&
2790 guest_cpuid_is_intel_compatible(apic->vcpu))
2791 kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
2792 return r;
2793 }
2794 return 0;
2795 }
2796
kvm_apic_nmi_wd_deliver(struct kvm_vcpu * vcpu)2797 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2798 {
2799 struct kvm_lapic *apic = vcpu->arch.apic;
2800
2801 if (apic)
2802 kvm_apic_local_deliver(apic, APIC_LVT0);
2803 }
2804
2805 static const struct kvm_io_device_ops apic_mmio_ops = {
2806 .read = apic_mmio_read,
2807 .write = apic_mmio_write,
2808 };
2809
apic_timer_fn(struct hrtimer * data)2810 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2811 {
2812 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2813 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2814
2815 apic_timer_expired(apic, true);
2816
2817 if (lapic_is_periodic(apic)) {
2818 advance_periodic_target_expiration(apic);
2819 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2820 return HRTIMER_RESTART;
2821 } else
2822 return HRTIMER_NORESTART;
2823 }
2824
kvm_create_lapic(struct kvm_vcpu * vcpu)2825 int kvm_create_lapic(struct kvm_vcpu *vcpu)
2826 {
2827 struct kvm_lapic *apic;
2828
2829 ASSERT(vcpu != NULL);
2830
2831 if (!irqchip_in_kernel(vcpu->kvm)) {
2832 static_branch_inc(&kvm_has_noapic_vcpu);
2833 return 0;
2834 }
2835
2836 apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2837 if (!apic)
2838 goto nomem;
2839
2840 vcpu->arch.apic = apic;
2841
2842 if (kvm_x86_ops.alloc_apic_backing_page)
2843 apic->regs = kvm_x86_call(alloc_apic_backing_page)(vcpu);
2844 else
2845 apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2846 if (!apic->regs) {
2847 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2848 vcpu->vcpu_id);
2849 goto nomem_free_apic;
2850 }
2851 apic->vcpu = vcpu;
2852
2853 apic->nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
2854
2855 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2856 HRTIMER_MODE_ABS_HARD);
2857 apic->lapic_timer.timer.function = apic_timer_fn;
2858 if (lapic_timer_advance)
2859 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2860
2861 /*
2862 * Stuff the APIC ENABLE bit in lieu of temporarily incrementing
2863 * apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
2864 */
2865 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2866 static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2867 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2868
2869 /*
2870 * Defer evaluating inhibits until the vCPU is first run, as this vCPU
2871 * will not get notified of any changes until this vCPU is visible to
2872 * other vCPUs (marked online and added to the set of vCPUs).
2873 *
2874 * Opportunistically mark APICv active as VMX in particularly is highly
2875 * unlikely to have inhibits. Ignore the current per-VM APICv state so
2876 * that vCPU creation is guaranteed to run with a deterministic value,
2877 * the request will ensure the vCPU gets the correct state before VM-Entry.
2878 */
2879 if (enable_apicv) {
2880 apic->apicv_active = true;
2881 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2882 }
2883
2884 return 0;
2885 nomem_free_apic:
2886 kfree(apic);
2887 vcpu->arch.apic = NULL;
2888 nomem:
2889 return -ENOMEM;
2890 }
2891
kvm_apic_has_interrupt(struct kvm_vcpu * vcpu)2892 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2893 {
2894 struct kvm_lapic *apic = vcpu->arch.apic;
2895 u32 ppr;
2896
2897 if (!kvm_apic_present(vcpu))
2898 return -1;
2899
2900 __apic_update_ppr(apic, &ppr);
2901 return apic_has_interrupt_for_ppr(apic, ppr);
2902 }
2903 EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
2904
kvm_apic_accept_pic_intr(struct kvm_vcpu * vcpu)2905 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2906 {
2907 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2908
2909 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2910 return 1;
2911 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2912 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2913 return 1;
2914 return 0;
2915 }
2916
kvm_inject_apic_timer_irqs(struct kvm_vcpu * vcpu)2917 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2918 {
2919 struct kvm_lapic *apic = vcpu->arch.apic;
2920
2921 if (atomic_read(&apic->lapic_timer.pending) > 0) {
2922 kvm_apic_inject_pending_timer_irqs(apic);
2923 atomic_set(&apic->lapic_timer.pending, 0);
2924 }
2925 }
2926
kvm_get_apic_interrupt(struct kvm_vcpu * vcpu)2927 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2928 {
2929 int vector = kvm_apic_has_interrupt(vcpu);
2930 struct kvm_lapic *apic = vcpu->arch.apic;
2931 u32 ppr;
2932
2933 if (vector == -1)
2934 return -1;
2935
2936 /*
2937 * We get here even with APIC virtualization enabled, if doing
2938 * nested virtualization and L1 runs with the "acknowledge interrupt
2939 * on exit" mode. Then we cannot inject the interrupt via RVI,
2940 * because the process would deliver it through the IDT.
2941 */
2942
2943 apic_clear_irr(vector, apic);
2944 if (kvm_hv_synic_auto_eoi_set(vcpu, vector)) {
2945 /*
2946 * For auto-EOI interrupts, there might be another pending
2947 * interrupt above PPR, so check whether to raise another
2948 * KVM_REQ_EVENT.
2949 */
2950 apic_update_ppr(apic);
2951 } else {
2952 /*
2953 * For normal interrupts, PPR has been raised and there cannot
2954 * be a higher-priority pending interrupt---except if there was
2955 * a concurrent interrupt injection, but that would have
2956 * triggered KVM_REQ_EVENT already.
2957 */
2958 apic_set_isr(vector, apic);
2959 __apic_update_ppr(apic, &ppr);
2960 }
2961
2962 return vector;
2963 }
2964
kvm_apic_state_fixup(struct kvm_vcpu * vcpu,struct kvm_lapic_state * s,bool set)2965 static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2966 struct kvm_lapic_state *s, bool set)
2967 {
2968 if (apic_x2apic_mode(vcpu->arch.apic)) {
2969 u32 *id = (u32 *)(s->regs + APIC_ID);
2970 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2971 u64 icr;
2972
2973 if (vcpu->kvm->arch.x2apic_format) {
2974 if (*id != vcpu->vcpu_id)
2975 return -EINVAL;
2976 } else {
2977 if (set)
2978 *id >>= 24;
2979 else
2980 *id <<= 24;
2981 }
2982
2983 /*
2984 * In x2APIC mode, the LDR is fixed and based on the id. And
2985 * ICR is internally a single 64-bit register, but needs to be
2986 * split to ICR+ICR2 in userspace for backwards compatibility.
2987 */
2988 if (set) {
2989 *ldr = kvm_apic_calc_x2apic_ldr(*id);
2990
2991 icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
2992 (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;
2993 __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr);
2994 } else {
2995 icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
2996 __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
2997 }
2998 }
2999
3000 return 0;
3001 }
3002
kvm_apic_get_state(struct kvm_vcpu * vcpu,struct kvm_lapic_state * s)3003 int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
3004 {
3005 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
3006
3007 /*
3008 * Get calculated timer current count for remaining timer period (if
3009 * any) and store it in the returned register set.
3010 */
3011 __kvm_lapic_set_reg(s->regs, APIC_TMCCT,
3012 __apic_read(vcpu->arch.apic, APIC_TMCCT));
3013
3014 return kvm_apic_state_fixup(vcpu, s, false);
3015 }
3016
kvm_apic_set_state(struct kvm_vcpu * vcpu,struct kvm_lapic_state * s)3017 int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
3018 {
3019 struct kvm_lapic *apic = vcpu->arch.apic;
3020 int r;
3021
3022 kvm_x86_call(apicv_pre_state_restore)(vcpu);
3023
3024 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
3025 /* set SPIV separately to get count of SW disabled APICs right */
3026 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
3027
3028 r = kvm_apic_state_fixup(vcpu, s, true);
3029 if (r) {
3030 kvm_recalculate_apic_map(vcpu->kvm);
3031 return r;
3032 }
3033 memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
3034
3035 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
3036 kvm_recalculate_apic_map(vcpu->kvm);
3037 kvm_apic_set_version(vcpu);
3038
3039 apic_update_ppr(apic);
3040 cancel_apic_timer(apic);
3041 apic->lapic_timer.expired_tscdeadline = 0;
3042 apic_update_lvtt(apic);
3043 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
3044 update_divide_count(apic);
3045 __start_apic_timer(apic, APIC_TMCCT);
3046 kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
3047 kvm_apic_update_apicv(vcpu);
3048 if (apic->apicv_active) {
3049 kvm_x86_call(apicv_post_state_restore)(vcpu);
3050 kvm_x86_call(hwapic_irr_update)(vcpu,
3051 apic_find_highest_irr(apic));
3052 kvm_x86_call(hwapic_isr_update)(apic_find_highest_isr(apic));
3053 }
3054 kvm_make_request(KVM_REQ_EVENT, vcpu);
3055 if (ioapic_in_kernel(vcpu->kvm))
3056 kvm_rtc_eoi_tracking_restore_one(vcpu);
3057
3058 vcpu->arch.apic_arb_prio = 0;
3059
3060 return 0;
3061 }
3062
__kvm_migrate_apic_timer(struct kvm_vcpu * vcpu)3063 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
3064 {
3065 struct hrtimer *timer;
3066
3067 if (!lapic_in_kernel(vcpu) ||
3068 kvm_can_post_timer_interrupt(vcpu))
3069 return;
3070
3071 timer = &vcpu->arch.apic->lapic_timer.timer;
3072 if (hrtimer_cancel(timer))
3073 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
3074 }
3075
3076 /*
3077 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
3078 *
3079 * Detect whether guest triggered PV EOI since the
3080 * last entry. If yes, set EOI on guests's behalf.
3081 * Clear PV EOI in guest memory in any case.
3082 */
apic_sync_pv_eoi_from_guest(struct kvm_vcpu * vcpu,struct kvm_lapic * apic)3083 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
3084 struct kvm_lapic *apic)
3085 {
3086 int vector;
3087 /*
3088 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
3089 * and KVM_PV_EOI_ENABLED in guest memory as follows:
3090 *
3091 * KVM_APIC_PV_EOI_PENDING is unset:
3092 * -> host disabled PV EOI.
3093 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
3094 * -> host enabled PV EOI, guest did not execute EOI yet.
3095 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
3096 * -> host enabled PV EOI, guest executed EOI.
3097 */
3098 BUG_ON(!pv_eoi_enabled(vcpu));
3099
3100 if (pv_eoi_test_and_clr_pending(vcpu))
3101 return;
3102 vector = apic_set_eoi(apic);
3103 trace_kvm_pv_eoi(apic, vector);
3104 }
3105
kvm_lapic_sync_from_vapic(struct kvm_vcpu * vcpu)3106 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
3107 {
3108 u32 data;
3109
3110 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
3111 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
3112
3113 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3114 return;
3115
3116 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3117 sizeof(u32)))
3118 return;
3119
3120 apic_set_tpr(vcpu->arch.apic, data & 0xff);
3121 }
3122
3123 /*
3124 * apic_sync_pv_eoi_to_guest - called before vmentry
3125 *
3126 * Detect whether it's safe to enable PV EOI and
3127 * if yes do so.
3128 */
apic_sync_pv_eoi_to_guest(struct kvm_vcpu * vcpu,struct kvm_lapic * apic)3129 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
3130 struct kvm_lapic *apic)
3131 {
3132 if (!pv_eoi_enabled(vcpu) ||
3133 /* IRR set or many bits in ISR: could be nested. */
3134 apic->irr_pending ||
3135 /* Cache not set: could be safe but we don't bother. */
3136 apic->highest_isr_cache == -1 ||
3137 /* Need EOI to update ioapic. */
3138 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
3139 /*
3140 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
3141 * so we need not do anything here.
3142 */
3143 return;
3144 }
3145
3146 pv_eoi_set_pending(apic->vcpu);
3147 }
3148
kvm_lapic_sync_to_vapic(struct kvm_vcpu * vcpu)3149 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
3150 {
3151 u32 data, tpr;
3152 int max_irr, max_isr;
3153 struct kvm_lapic *apic = vcpu->arch.apic;
3154
3155 apic_sync_pv_eoi_to_guest(vcpu, apic);
3156
3157 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3158 return;
3159
3160 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
3161 max_irr = apic_find_highest_irr(apic);
3162 if (max_irr < 0)
3163 max_irr = 0;
3164 max_isr = apic_find_highest_isr(apic);
3165 if (max_isr < 0)
3166 max_isr = 0;
3167 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
3168
3169 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3170 sizeof(u32));
3171 }
3172
kvm_lapic_set_vapic_addr(struct kvm_vcpu * vcpu,gpa_t vapic_addr)3173 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
3174 {
3175 if (vapic_addr) {
3176 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
3177 &vcpu->arch.apic->vapic_cache,
3178 vapic_addr, sizeof(u32)))
3179 return -EINVAL;
3180 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3181 } else {
3182 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3183 }
3184
3185 vcpu->arch.apic->vapic_addr = vapic_addr;
3186 return 0;
3187 }
3188
kvm_x2apic_icr_write(struct kvm_lapic * apic,u64 data)3189 int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
3190 {
3191 data &= ~APIC_ICR_BUSY;
3192
3193 kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
3194 kvm_lapic_set_reg64(apic, APIC_ICR, data);
3195 trace_kvm_apic_write(APIC_ICR, data);
3196 return 0;
3197 }
3198
kvm_lapic_msr_read(struct kvm_lapic * apic,u32 reg,u64 * data)3199 static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
3200 {
3201 u32 low;
3202
3203 if (reg == APIC_ICR) {
3204 *data = kvm_lapic_get_reg64(apic, APIC_ICR);
3205 return 0;
3206 }
3207
3208 if (kvm_lapic_reg_read(apic, reg, 4, &low))
3209 return 1;
3210
3211 *data = low;
3212
3213 return 0;
3214 }
3215
kvm_lapic_msr_write(struct kvm_lapic * apic,u32 reg,u64 data)3216 static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data)
3217 {
3218 /*
3219 * ICR is a 64-bit register in x2APIC mode (and Hyper-V PV vAPIC) and
3220 * can be written as such, all other registers remain accessible only
3221 * through 32-bit reads/writes.
3222 */
3223 if (reg == APIC_ICR)
3224 return kvm_x2apic_icr_write(apic, data);
3225
3226 /* Bits 63:32 are reserved in all other registers. */
3227 if (data >> 32)
3228 return 1;
3229
3230 return kvm_lapic_reg_write(apic, reg, (u32)data);
3231 }
3232
kvm_x2apic_msr_write(struct kvm_vcpu * vcpu,u32 msr,u64 data)3233 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
3234 {
3235 struct kvm_lapic *apic = vcpu->arch.apic;
3236 u32 reg = (msr - APIC_BASE_MSR) << 4;
3237
3238 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3239 return 1;
3240
3241 return kvm_lapic_msr_write(apic, reg, data);
3242 }
3243
kvm_x2apic_msr_read(struct kvm_vcpu * vcpu,u32 msr,u64 * data)3244 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
3245 {
3246 struct kvm_lapic *apic = vcpu->arch.apic;
3247 u32 reg = (msr - APIC_BASE_MSR) << 4;
3248
3249 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3250 return 1;
3251
3252 return kvm_lapic_msr_read(apic, reg, data);
3253 }
3254
kvm_hv_vapic_msr_write(struct kvm_vcpu * vcpu,u32 reg,u64 data)3255 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
3256 {
3257 if (!lapic_in_kernel(vcpu))
3258 return 1;
3259
3260 return kvm_lapic_msr_write(vcpu->arch.apic, reg, data);
3261 }
3262
kvm_hv_vapic_msr_read(struct kvm_vcpu * vcpu,u32 reg,u64 * data)3263 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
3264 {
3265 if (!lapic_in_kernel(vcpu))
3266 return 1;
3267
3268 return kvm_lapic_msr_read(vcpu->arch.apic, reg, data);
3269 }
3270
kvm_lapic_set_pv_eoi(struct kvm_vcpu * vcpu,u64 data,unsigned long len)3271 int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
3272 {
3273 u64 addr = data & ~KVM_MSR_ENABLED;
3274 struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
3275 unsigned long new_len;
3276 int ret;
3277
3278 if (!IS_ALIGNED(addr, 4))
3279 return 1;
3280
3281 if (data & KVM_MSR_ENABLED) {
3282 if (addr == ghc->gpa && len <= ghc->len)
3283 new_len = ghc->len;
3284 else
3285 new_len = len;
3286
3287 ret = kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
3288 if (ret)
3289 return ret;
3290 }
3291
3292 vcpu->arch.pv_eoi.msr_val = data;
3293
3294 return 0;
3295 }
3296
kvm_apic_accept_events(struct kvm_vcpu * vcpu)3297 int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
3298 {
3299 struct kvm_lapic *apic = vcpu->arch.apic;
3300 u8 sipi_vector;
3301 int r;
3302
3303 if (!kvm_apic_has_pending_init_or_sipi(vcpu))
3304 return 0;
3305
3306 if (is_guest_mode(vcpu)) {
3307 r = kvm_check_nested_events(vcpu);
3308 if (r < 0)
3309 return r == -EBUSY ? 0 : r;
3310 /*
3311 * Continue processing INIT/SIPI even if a nested VM-Exit
3312 * occurred, e.g. pending SIPIs should be dropped if INIT+SIPI
3313 * are blocked as a result of transitioning to VMX root mode.
3314 */
3315 }
3316
3317 /*
3318 * INITs are blocked while CPU is in specific states (SMM, VMX root
3319 * mode, SVM with GIF=0), while SIPIs are dropped if the CPU isn't in
3320 * wait-for-SIPI (WFS).
3321 */
3322 if (!kvm_apic_init_sipi_allowed(vcpu)) {
3323 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
3324 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
3325 return 0;
3326 }
3327
3328 if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
3329 kvm_vcpu_reset(vcpu, true);
3330 if (kvm_vcpu_is_bsp(apic->vcpu))
3331 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3332 else
3333 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3334 }
3335 if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events)) {
3336 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
3337 /* evaluate pending_events before reading the vector */
3338 smp_rmb();
3339 sipi_vector = apic->sipi_vector;
3340 kvm_x86_call(vcpu_deliver_sipi_vector)(vcpu,
3341 sipi_vector);
3342 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3343 }
3344 }
3345 return 0;
3346 }
3347
kvm_lapic_exit(void)3348 void kvm_lapic_exit(void)
3349 {
3350 static_key_deferred_flush(&apic_hw_disabled);
3351 WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
3352 static_key_deferred_flush(&apic_sw_disabled);
3353 WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
3354 }
3355