1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * MMU support
9 *
10 * Copyright (C) 2006 Qumranet, Inc.
11 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12 *
13 * Authors:
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Avi Kivity <avi@qumranet.com>
16 */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include "irq.h"
20 #include "ioapic.h"
21 #include "mmu.h"
22 #include "mmu_internal.h"
23 #include "tdp_mmu.h"
24 #include "x86.h"
25 #include "kvm_cache_regs.h"
26 #include "smm.h"
27 #include "kvm_emulate.h"
28 #include "page_track.h"
29 #include "cpuid.h"
30 #include "spte.h"
31
32 #include <linux/kvm_host.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/mm.h>
36 #include <linux/highmem.h>
37 #include <linux/moduleparam.h>
38 #include <linux/export.h>
39 #include <linux/swap.h>
40 #include <linux/hugetlb.h>
41 #include <linux/compiler.h>
42 #include <linux/srcu.h>
43 #include <linux/slab.h>
44 #include <linux/sched/signal.h>
45 #include <linux/uaccess.h>
46 #include <linux/hash.h>
47 #include <linux/kern_levels.h>
48 #include <linux/kstrtox.h>
49 #include <linux/kthread.h>
50 #include <linux/wordpart.h>
51
52 #include <asm/page.h>
53 #include <asm/memtype.h>
54 #include <asm/cmpxchg.h>
55 #include <asm/io.h>
56 #include <asm/set_memory.h>
57 #include <asm/spec-ctrl.h>
58 #include <asm/vmx.h>
59
60 #include "trace.h"
61
62 static bool nx_hugepage_mitigation_hard_disabled;
63
64 int __read_mostly nx_huge_pages = -1;
65 static uint __read_mostly nx_huge_pages_recovery_period_ms;
66 #ifdef CONFIG_PREEMPT_RT
67 /* Recovery can cause latency spikes, disable it for PREEMPT_RT. */
68 static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
69 #else
70 static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
71 #endif
72
73 static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp);
74 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
75 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp);
76
77 static const struct kernel_param_ops nx_huge_pages_ops = {
78 .set = set_nx_huge_pages,
79 .get = get_nx_huge_pages,
80 };
81
82 static const struct kernel_param_ops nx_huge_pages_recovery_param_ops = {
83 .set = set_nx_huge_pages_recovery_param,
84 .get = param_get_uint,
85 };
86
87 module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
88 __MODULE_PARM_TYPE(nx_huge_pages, "bool");
89 module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_param_ops,
90 &nx_huge_pages_recovery_ratio, 0644);
91 __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
92 module_param_cb(nx_huge_pages_recovery_period_ms, &nx_huge_pages_recovery_param_ops,
93 &nx_huge_pages_recovery_period_ms, 0644);
94 __MODULE_PARM_TYPE(nx_huge_pages_recovery_period_ms, "uint");
95
96 static bool __read_mostly force_flush_and_sync_on_reuse;
97 module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
98
99 /*
100 * When setting this variable to true it enables Two-Dimensional-Paging
101 * where the hardware walks 2 page tables:
102 * 1. the guest-virtual to guest-physical
103 * 2. while doing 1. it walks guest-physical to host-physical
104 * If the hardware supports that we don't need to do shadow paging.
105 */
106 bool tdp_enabled = false;
107
108 static bool __ro_after_init tdp_mmu_allowed;
109
110 #ifdef CONFIG_X86_64
111 bool __read_mostly tdp_mmu_enabled = true;
112 module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0444);
113 #endif
114
115 static int max_huge_page_level __read_mostly;
116 static int tdp_root_level __read_mostly;
117 static int max_tdp_level __read_mostly;
118
119 #define PTE_PREFETCH_NUM 8
120
121 #include <trace/events/kvm.h>
122
123 /* make pte_list_desc fit well in cache lines */
124 #define PTE_LIST_EXT 14
125
126 /*
127 * struct pte_list_desc is the core data structure used to implement a custom
128 * list for tracking a set of related SPTEs, e.g. all the SPTEs that map a
129 * given GFN when used in the context of rmaps. Using a custom list allows KVM
130 * to optimize for the common case where many GFNs will have at most a handful
131 * of SPTEs pointing at them, i.e. allows packing multiple SPTEs into a small
132 * memory footprint, which in turn improves runtime performance by exploiting
133 * cache locality.
134 *
135 * A list is comprised of one or more pte_list_desc objects (descriptors).
136 * Each individual descriptor stores up to PTE_LIST_EXT SPTEs. If a descriptor
137 * is full and a new SPTEs needs to be added, a new descriptor is allocated and
138 * becomes the head of the list. This means that by definitions, all tail
139 * descriptors are full.
140 *
141 * Note, the meta data fields are deliberately placed at the start of the
142 * structure to optimize the cacheline layout; accessing the descriptor will
143 * touch only a single cacheline so long as @spte_count<=6 (or if only the
144 * descriptors metadata is accessed).
145 */
146 struct pte_list_desc {
147 struct pte_list_desc *more;
148 /* The number of PTEs stored in _this_ descriptor. */
149 u32 spte_count;
150 /* The number of PTEs stored in all tails of this descriptor. */
151 u32 tail_count;
152 u64 *sptes[PTE_LIST_EXT];
153 };
154
155 struct kvm_shadow_walk_iterator {
156 u64 addr;
157 hpa_t shadow_addr;
158 u64 *sptep;
159 int level;
160 unsigned index;
161 };
162
163 #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) \
164 for (shadow_walk_init_using_root(&(_walker), (_vcpu), \
165 (_root), (_addr)); \
166 shadow_walk_okay(&(_walker)); \
167 shadow_walk_next(&(_walker)))
168
169 #define for_each_shadow_entry(_vcpu, _addr, _walker) \
170 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
171 shadow_walk_okay(&(_walker)); \
172 shadow_walk_next(&(_walker)))
173
174 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \
175 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
176 shadow_walk_okay(&(_walker)) && \
177 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
178 __shadow_walk_next(&(_walker), spte))
179
180 static struct kmem_cache *pte_list_desc_cache;
181 struct kmem_cache *mmu_page_header_cache;
182 static struct percpu_counter kvm_total_used_mmu_pages;
183
184 static void mmu_spte_set(u64 *sptep, u64 spte);
185
186 struct kvm_mmu_role_regs {
187 const unsigned long cr0;
188 const unsigned long cr4;
189 const u64 efer;
190 };
191
192 #define CREATE_TRACE_POINTS
193 #include "mmutrace.h"
194
195 /*
196 * Yes, lot's of underscores. They're a hint that you probably shouldn't be
197 * reading from the role_regs. Once the root_role is constructed, it becomes
198 * the single source of truth for the MMU's state.
199 */
200 #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag) \
201 static inline bool __maybe_unused \
202 ____is_##reg##_##name(const struct kvm_mmu_role_regs *regs) \
203 { \
204 return !!(regs->reg & flag); \
205 }
206 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
207 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
208 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
209 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
210 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
211 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
212 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
213 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
214 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
215 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
216
217 /*
218 * The MMU itself (with a valid role) is the single source of truth for the
219 * MMU. Do not use the regs used to build the MMU/role, nor the vCPU. The
220 * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
221 * and the vCPU may be incorrect/irrelevant.
222 */
223 #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name) \
224 static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu) \
225 { \
226 return !!(mmu->cpu_role. base_or_ext . reg##_##name); \
227 }
228 BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
229 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse);
230 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smep);
231 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap);
232 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke);
233 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57);
234 BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
235 BUILD_MMU_ROLE_ACCESSOR(ext, efer, lma);
236
is_cr0_pg(struct kvm_mmu * mmu)237 static inline bool is_cr0_pg(struct kvm_mmu *mmu)
238 {
239 return mmu->cpu_role.base.level > 0;
240 }
241
is_cr4_pae(struct kvm_mmu * mmu)242 static inline bool is_cr4_pae(struct kvm_mmu *mmu)
243 {
244 return !mmu->cpu_role.base.has_4_byte_gpte;
245 }
246
vcpu_to_role_regs(struct kvm_vcpu * vcpu)247 static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
248 {
249 struct kvm_mmu_role_regs regs = {
250 .cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
251 .cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
252 .efer = vcpu->arch.efer,
253 };
254
255 return regs;
256 }
257
get_guest_cr3(struct kvm_vcpu * vcpu)258 static unsigned long get_guest_cr3(struct kvm_vcpu *vcpu)
259 {
260 return kvm_read_cr3(vcpu);
261 }
262
kvm_mmu_get_guest_pgd(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)263 static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu,
264 struct kvm_mmu *mmu)
265 {
266 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3)
267 return kvm_read_cr3(vcpu);
268
269 return mmu->get_guest_pgd(vcpu);
270 }
271
kvm_available_flush_remote_tlbs_range(void)272 static inline bool kvm_available_flush_remote_tlbs_range(void)
273 {
274 #if IS_ENABLED(CONFIG_HYPERV)
275 return kvm_x86_ops.flush_remote_tlbs_range;
276 #else
277 return false;
278 #endif
279 }
280
281 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
282
283 /* Flush the range of guest memory mapped by the given SPTE. */
kvm_flush_remote_tlbs_sptep(struct kvm * kvm,u64 * sptep)284 static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep)
285 {
286 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
287 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep));
288
289 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
290 }
291
mark_mmio_spte(struct kvm_vcpu * vcpu,u64 * sptep,u64 gfn,unsigned int access)292 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
293 unsigned int access)
294 {
295 u64 spte = make_mmio_spte(vcpu, gfn, access);
296
297 trace_mark_mmio_spte(sptep, gfn, spte);
298 mmu_spte_set(sptep, spte);
299 }
300
get_mmio_spte_gfn(u64 spte)301 static gfn_t get_mmio_spte_gfn(u64 spte)
302 {
303 u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
304
305 gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
306 & shadow_nonpresent_or_rsvd_mask;
307
308 return gpa >> PAGE_SHIFT;
309 }
310
get_mmio_spte_access(u64 spte)311 static unsigned get_mmio_spte_access(u64 spte)
312 {
313 return spte & shadow_mmio_access_mask;
314 }
315
check_mmio_spte(struct kvm_vcpu * vcpu,u64 spte)316 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
317 {
318 u64 kvm_gen, spte_gen, gen;
319
320 gen = kvm_vcpu_memslots(vcpu)->generation;
321 if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
322 return false;
323
324 kvm_gen = gen & MMIO_SPTE_GEN_MASK;
325 spte_gen = get_mmio_spte_generation(spte);
326
327 trace_check_mmio_spte(spte, kvm_gen, spte_gen);
328 return likely(kvm_gen == spte_gen);
329 }
330
is_cpuid_PSE36(void)331 static int is_cpuid_PSE36(void)
332 {
333 return 1;
334 }
335
336 #ifdef CONFIG_X86_64
__set_spte(u64 * sptep,u64 spte)337 static void __set_spte(u64 *sptep, u64 spte)
338 {
339 KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
340 WRITE_ONCE(*sptep, spte);
341 }
342
__update_clear_spte_fast(u64 * sptep,u64 spte)343 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
344 {
345 KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
346 WRITE_ONCE(*sptep, spte);
347 }
348
__update_clear_spte_slow(u64 * sptep,u64 spte)349 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
350 {
351 KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
352 return xchg(sptep, spte);
353 }
354
__get_spte_lockless(u64 * sptep)355 static u64 __get_spte_lockless(u64 *sptep)
356 {
357 return READ_ONCE(*sptep);
358 }
359 #else
360 union split_spte {
361 struct {
362 u32 spte_low;
363 u32 spte_high;
364 };
365 u64 spte;
366 };
367
count_spte_clear(u64 * sptep,u64 spte)368 static void count_spte_clear(u64 *sptep, u64 spte)
369 {
370 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
371
372 if (is_shadow_present_pte(spte))
373 return;
374
375 /* Ensure the spte is completely set before we increase the count */
376 smp_wmb();
377 sp->clear_spte_count++;
378 }
379
__set_spte(u64 * sptep,u64 spte)380 static void __set_spte(u64 *sptep, u64 spte)
381 {
382 union split_spte *ssptep, sspte;
383
384 ssptep = (union split_spte *)sptep;
385 sspte = (union split_spte)spte;
386
387 ssptep->spte_high = sspte.spte_high;
388
389 /*
390 * If we map the spte from nonpresent to present, We should store
391 * the high bits firstly, then set present bit, so cpu can not
392 * fetch this spte while we are setting the spte.
393 */
394 smp_wmb();
395
396 WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
397 }
398
__update_clear_spte_fast(u64 * sptep,u64 spte)399 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
400 {
401 union split_spte *ssptep, sspte;
402
403 ssptep = (union split_spte *)sptep;
404 sspte = (union split_spte)spte;
405
406 WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
407
408 /*
409 * If we map the spte from present to nonpresent, we should clear
410 * present bit firstly to avoid vcpu fetch the old high bits.
411 */
412 smp_wmb();
413
414 ssptep->spte_high = sspte.spte_high;
415 count_spte_clear(sptep, spte);
416 }
417
__update_clear_spte_slow(u64 * sptep,u64 spte)418 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
419 {
420 union split_spte *ssptep, sspte, orig;
421
422 ssptep = (union split_spte *)sptep;
423 sspte = (union split_spte)spte;
424
425 /* xchg acts as a barrier before the setting of the high bits */
426 orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
427 orig.spte_high = ssptep->spte_high;
428 ssptep->spte_high = sspte.spte_high;
429 count_spte_clear(sptep, spte);
430
431 return orig.spte;
432 }
433
434 /*
435 * The idea using the light way get the spte on x86_32 guest is from
436 * gup_get_pte (mm/gup.c).
437 *
438 * An spte tlb flush may be pending, because they are coalesced and
439 * we are running out of the MMU lock. Therefore
440 * we need to protect against in-progress updates of the spte.
441 *
442 * Reading the spte while an update is in progress may get the old value
443 * for the high part of the spte. The race is fine for a present->non-present
444 * change (because the high part of the spte is ignored for non-present spte),
445 * but for a present->present change we must reread the spte.
446 *
447 * All such changes are done in two steps (present->non-present and
448 * non-present->present), hence it is enough to count the number of
449 * present->non-present updates: if it changed while reading the spte,
450 * we might have hit the race. This is done using clear_spte_count.
451 */
__get_spte_lockless(u64 * sptep)452 static u64 __get_spte_lockless(u64 *sptep)
453 {
454 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
455 union split_spte spte, *orig = (union split_spte *)sptep;
456 int count;
457
458 retry:
459 count = sp->clear_spte_count;
460 smp_rmb();
461
462 spte.spte_low = orig->spte_low;
463 smp_rmb();
464
465 spte.spte_high = orig->spte_high;
466 smp_rmb();
467
468 if (unlikely(spte.spte_low != orig->spte_low ||
469 count != sp->clear_spte_count))
470 goto retry;
471
472 return spte.spte;
473 }
474 #endif
475
476 /* Rules for using mmu_spte_set:
477 * Set the sptep from nonpresent to present.
478 * Note: the sptep being assigned *must* be either not present
479 * or in a state where the hardware will not attempt to update
480 * the spte.
481 */
mmu_spte_set(u64 * sptep,u64 new_spte)482 static void mmu_spte_set(u64 *sptep, u64 new_spte)
483 {
484 WARN_ON_ONCE(is_shadow_present_pte(*sptep));
485 __set_spte(sptep, new_spte);
486 }
487
488 /*
489 * Update the SPTE (excluding the PFN), but do not track changes in its
490 * accessed/dirty status.
491 */
mmu_spte_update_no_track(u64 * sptep,u64 new_spte)492 static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
493 {
494 u64 old_spte = *sptep;
495
496 WARN_ON_ONCE(!is_shadow_present_pte(new_spte));
497 check_spte_writable_invariants(new_spte);
498
499 if (!is_shadow_present_pte(old_spte)) {
500 mmu_spte_set(sptep, new_spte);
501 return old_spte;
502 }
503
504 if (!spte_has_volatile_bits(old_spte))
505 __update_clear_spte_fast(sptep, new_spte);
506 else
507 old_spte = __update_clear_spte_slow(sptep, new_spte);
508
509 WARN_ON_ONCE(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
510
511 return old_spte;
512 }
513
514 /* Rules for using mmu_spte_update:
515 * Update the state bits, it means the mapped pfn is not changed.
516 *
517 * Whenever an MMU-writable SPTE is overwritten with a read-only SPTE, remote
518 * TLBs must be flushed. Otherwise rmap_write_protect will find a read-only
519 * spte, even though the writable spte might be cached on a CPU's TLB.
520 *
521 * Returns true if the TLB needs to be flushed
522 */
mmu_spte_update(u64 * sptep,u64 new_spte)523 static bool mmu_spte_update(u64 *sptep, u64 new_spte)
524 {
525 bool flush = false;
526 u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
527
528 if (!is_shadow_present_pte(old_spte))
529 return false;
530
531 /*
532 * For the spte updated out of mmu-lock is safe, since
533 * we always atomically update it, see the comments in
534 * spte_has_volatile_bits().
535 */
536 if (is_mmu_writable_spte(old_spte) &&
537 !is_writable_pte(new_spte))
538 flush = true;
539
540 /*
541 * Flush TLB when accessed/dirty states are changed in the page tables,
542 * to guarantee consistency between TLB and page tables.
543 */
544
545 if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
546 flush = true;
547 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
548 }
549
550 if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
551 flush = true;
552 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
553 }
554
555 return flush;
556 }
557
558 /*
559 * Rules for using mmu_spte_clear_track_bits:
560 * It sets the sptep from present to nonpresent, and track the
561 * state bits, it is used to clear the last level sptep.
562 * Returns the old PTE.
563 */
mmu_spte_clear_track_bits(struct kvm * kvm,u64 * sptep)564 static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
565 {
566 kvm_pfn_t pfn;
567 u64 old_spte = *sptep;
568 int level = sptep_to_sp(sptep)->role.level;
569 struct page *page;
570
571 if (!is_shadow_present_pte(old_spte) ||
572 !spte_has_volatile_bits(old_spte))
573 __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
574 else
575 old_spte = __update_clear_spte_slow(sptep, SHADOW_NONPRESENT_VALUE);
576
577 if (!is_shadow_present_pte(old_spte))
578 return old_spte;
579
580 kvm_update_page_stats(kvm, level, -1);
581
582 pfn = spte_to_pfn(old_spte);
583
584 /*
585 * KVM doesn't hold a reference to any pages mapped into the guest, and
586 * instead uses the mmu_notifier to ensure that KVM unmaps any pages
587 * before they are reclaimed. Sanity check that, if the pfn is backed
588 * by a refcounted page, the refcount is elevated.
589 */
590 page = kvm_pfn_to_refcounted_page(pfn);
591 WARN_ON_ONCE(page && !page_count(page));
592
593 if (is_accessed_spte(old_spte))
594 kvm_set_pfn_accessed(pfn);
595
596 if (is_dirty_spte(old_spte))
597 kvm_set_pfn_dirty(pfn);
598
599 return old_spte;
600 }
601
602 /*
603 * Rules for using mmu_spte_clear_no_track:
604 * Directly clear spte without caring the state bits of sptep,
605 * it is used to set the upper level spte.
606 */
mmu_spte_clear_no_track(u64 * sptep)607 static void mmu_spte_clear_no_track(u64 *sptep)
608 {
609 __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
610 }
611
mmu_spte_get_lockless(u64 * sptep)612 static u64 mmu_spte_get_lockless(u64 *sptep)
613 {
614 return __get_spte_lockless(sptep);
615 }
616
617 /* Returns the Accessed status of the PTE and resets it at the same time. */
mmu_spte_age(u64 * sptep)618 static bool mmu_spte_age(u64 *sptep)
619 {
620 u64 spte = mmu_spte_get_lockless(sptep);
621
622 if (!is_accessed_spte(spte))
623 return false;
624
625 if (spte_ad_enabled(spte)) {
626 clear_bit((ffs(shadow_accessed_mask) - 1),
627 (unsigned long *)sptep);
628 } else {
629 /*
630 * Capture the dirty status of the page, so that it doesn't get
631 * lost when the SPTE is marked for access tracking.
632 */
633 if (is_writable_pte(spte))
634 kvm_set_pfn_dirty(spte_to_pfn(spte));
635
636 spte = mark_spte_for_access_track(spte);
637 mmu_spte_update_no_track(sptep, spte);
638 }
639
640 return true;
641 }
642
is_tdp_mmu_active(struct kvm_vcpu * vcpu)643 static inline bool is_tdp_mmu_active(struct kvm_vcpu *vcpu)
644 {
645 return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct;
646 }
647
walk_shadow_page_lockless_begin(struct kvm_vcpu * vcpu)648 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
649 {
650 if (is_tdp_mmu_active(vcpu)) {
651 kvm_tdp_mmu_walk_lockless_begin();
652 } else {
653 /*
654 * Prevent page table teardown by making any free-er wait during
655 * kvm_flush_remote_tlbs() IPI to all active vcpus.
656 */
657 local_irq_disable();
658
659 /*
660 * Make sure a following spte read is not reordered ahead of the write
661 * to vcpu->mode.
662 */
663 smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
664 }
665 }
666
walk_shadow_page_lockless_end(struct kvm_vcpu * vcpu)667 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
668 {
669 if (is_tdp_mmu_active(vcpu)) {
670 kvm_tdp_mmu_walk_lockless_end();
671 } else {
672 /*
673 * Make sure the write to vcpu->mode is not reordered in front of
674 * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us
675 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
676 */
677 smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
678 local_irq_enable();
679 }
680 }
681
mmu_topup_memory_caches(struct kvm_vcpu * vcpu,bool maybe_indirect)682 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
683 {
684 int r;
685
686 /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
687 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
688 1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
689 if (r)
690 return r;
691 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
692 PT64_ROOT_MAX_LEVEL);
693 if (r)
694 return r;
695 if (maybe_indirect) {
696 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadowed_info_cache,
697 PT64_ROOT_MAX_LEVEL);
698 if (r)
699 return r;
700 }
701 return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
702 PT64_ROOT_MAX_LEVEL);
703 }
704
mmu_free_memory_caches(struct kvm_vcpu * vcpu)705 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
706 {
707 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
708 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
709 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
710 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
711 }
712
mmu_free_pte_list_desc(struct pte_list_desc * pte_list_desc)713 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
714 {
715 kmem_cache_free(pte_list_desc_cache, pte_list_desc);
716 }
717
718 static bool sp_has_gptes(struct kvm_mmu_page *sp);
719
kvm_mmu_page_get_gfn(struct kvm_mmu_page * sp,int index)720 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
721 {
722 if (sp->role.passthrough)
723 return sp->gfn;
724
725 if (!sp->role.direct)
726 return sp->shadowed_translation[index] >> PAGE_SHIFT;
727
728 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
729 }
730
731 /*
732 * For leaf SPTEs, fetch the *guest* access permissions being shadowed. Note
733 * that the SPTE itself may have a more constrained access permissions that
734 * what the guest enforces. For example, a guest may create an executable
735 * huge PTE but KVM may disallow execution to mitigate iTLB multihit.
736 */
kvm_mmu_page_get_access(struct kvm_mmu_page * sp,int index)737 static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
738 {
739 if (sp_has_gptes(sp))
740 return sp->shadowed_translation[index] & ACC_ALL;
741
742 /*
743 * For direct MMUs (e.g. TDP or non-paging guests) or passthrough SPs,
744 * KVM is not shadowing any guest page tables, so the "guest access
745 * permissions" are just ACC_ALL.
746 *
747 * For direct SPs in indirect MMUs (shadow paging), i.e. when KVM
748 * is shadowing a guest huge page with small pages, the guest access
749 * permissions being shadowed are the access permissions of the huge
750 * page.
751 *
752 * In both cases, sp->role.access contains the correct access bits.
753 */
754 return sp->role.access;
755 }
756
kvm_mmu_page_set_translation(struct kvm_mmu_page * sp,int index,gfn_t gfn,unsigned int access)757 static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
758 gfn_t gfn, unsigned int access)
759 {
760 if (sp_has_gptes(sp)) {
761 sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
762 return;
763 }
764
765 WARN_ONCE(access != kvm_mmu_page_get_access(sp, index),
766 "access mismatch under %s page %llx (expected %u, got %u)\n",
767 sp->role.passthrough ? "passthrough" : "direct",
768 sp->gfn, kvm_mmu_page_get_access(sp, index), access);
769
770 WARN_ONCE(gfn != kvm_mmu_page_get_gfn(sp, index),
771 "gfn mismatch under %s page %llx (expected %llx, got %llx)\n",
772 sp->role.passthrough ? "passthrough" : "direct",
773 sp->gfn, kvm_mmu_page_get_gfn(sp, index), gfn);
774 }
775
kvm_mmu_page_set_access(struct kvm_mmu_page * sp,int index,unsigned int access)776 static void kvm_mmu_page_set_access(struct kvm_mmu_page *sp, int index,
777 unsigned int access)
778 {
779 gfn_t gfn = kvm_mmu_page_get_gfn(sp, index);
780
781 kvm_mmu_page_set_translation(sp, index, gfn, access);
782 }
783
784 /*
785 * Return the pointer to the large page information for a given gfn,
786 * handling slots that are not large page aligned.
787 */
lpage_info_slot(gfn_t gfn,const struct kvm_memory_slot * slot,int level)788 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
789 const struct kvm_memory_slot *slot, int level)
790 {
791 unsigned long idx;
792
793 idx = gfn_to_index(gfn, slot->base_gfn, level);
794 return &slot->arch.lpage_info[level - 2][idx];
795 }
796
797 /*
798 * The most significant bit in disallow_lpage tracks whether or not memory
799 * attributes are mixed, i.e. not identical for all gfns at the current level.
800 * The lower order bits are used to refcount other cases where a hugepage is
801 * disallowed, e.g. if KVM has shadow a page table at the gfn.
802 */
803 #define KVM_LPAGE_MIXED_FLAG BIT(31)
804
update_gfn_disallow_lpage_count(const struct kvm_memory_slot * slot,gfn_t gfn,int count)805 static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
806 gfn_t gfn, int count)
807 {
808 struct kvm_lpage_info *linfo;
809 int old, i;
810
811 for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
812 linfo = lpage_info_slot(gfn, slot, i);
813
814 old = linfo->disallow_lpage;
815 linfo->disallow_lpage += count;
816 WARN_ON_ONCE((old ^ linfo->disallow_lpage) & KVM_LPAGE_MIXED_FLAG);
817 }
818 }
819
kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot * slot,gfn_t gfn)820 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
821 {
822 update_gfn_disallow_lpage_count(slot, gfn, 1);
823 }
824
kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot * slot,gfn_t gfn)825 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
826 {
827 update_gfn_disallow_lpage_count(slot, gfn, -1);
828 }
829
account_shadowed(struct kvm * kvm,struct kvm_mmu_page * sp)830 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
831 {
832 struct kvm_memslots *slots;
833 struct kvm_memory_slot *slot;
834 gfn_t gfn;
835
836 kvm->arch.indirect_shadow_pages++;
837 /*
838 * Ensure indirect_shadow_pages is elevated prior to re-reading guest
839 * child PTEs in FNAME(gpte_changed), i.e. guarantee either in-flight
840 * emulated writes are visible before re-reading guest PTEs, or that
841 * an emulated write will see the elevated count and acquire mmu_lock
842 * to update SPTEs. Pairs with the smp_mb() in kvm_mmu_track_write().
843 */
844 smp_mb();
845
846 gfn = sp->gfn;
847 slots = kvm_memslots_for_spte_role(kvm, sp->role);
848 slot = __gfn_to_memslot(slots, gfn);
849
850 /* the non-leaf shadow pages are keeping readonly. */
851 if (sp->role.level > PG_LEVEL_4K)
852 return __kvm_write_track_add_gfn(kvm, slot, gfn);
853
854 kvm_mmu_gfn_disallow_lpage(slot, gfn);
855
856 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
857 kvm_flush_remote_tlbs_gfn(kvm, gfn, PG_LEVEL_4K);
858 }
859
track_possible_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp)860 void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
861 {
862 /*
863 * If it's possible to replace the shadow page with an NX huge page,
864 * i.e. if the shadow page is the only thing currently preventing KVM
865 * from using a huge page, add the shadow page to the list of "to be
866 * zapped for NX recovery" pages. Note, the shadow page can already be
867 * on the list if KVM is reusing an existing shadow page, i.e. if KVM
868 * links a shadow page at multiple points.
869 */
870 if (!list_empty(&sp->possible_nx_huge_page_link))
871 return;
872
873 ++kvm->stat.nx_lpage_splits;
874 list_add_tail(&sp->possible_nx_huge_page_link,
875 &kvm->arch.possible_nx_huge_pages);
876 }
877
account_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp,bool nx_huge_page_possible)878 static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
879 bool nx_huge_page_possible)
880 {
881 sp->nx_huge_page_disallowed = true;
882
883 if (nx_huge_page_possible)
884 track_possible_nx_huge_page(kvm, sp);
885 }
886
unaccount_shadowed(struct kvm * kvm,struct kvm_mmu_page * sp)887 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
888 {
889 struct kvm_memslots *slots;
890 struct kvm_memory_slot *slot;
891 gfn_t gfn;
892
893 kvm->arch.indirect_shadow_pages--;
894 gfn = sp->gfn;
895 slots = kvm_memslots_for_spte_role(kvm, sp->role);
896 slot = __gfn_to_memslot(slots, gfn);
897 if (sp->role.level > PG_LEVEL_4K)
898 return __kvm_write_track_remove_gfn(kvm, slot, gfn);
899
900 kvm_mmu_gfn_allow_lpage(slot, gfn);
901 }
902
untrack_possible_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp)903 void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
904 {
905 if (list_empty(&sp->possible_nx_huge_page_link))
906 return;
907
908 --kvm->stat.nx_lpage_splits;
909 list_del_init(&sp->possible_nx_huge_page_link);
910 }
911
unaccount_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp)912 static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
913 {
914 sp->nx_huge_page_disallowed = false;
915
916 untrack_possible_nx_huge_page(kvm, sp);
917 }
918
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu * vcpu,gfn_t gfn,bool no_dirty_log)919 static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu,
920 gfn_t gfn,
921 bool no_dirty_log)
922 {
923 struct kvm_memory_slot *slot;
924
925 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
926 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
927 return NULL;
928 if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
929 return NULL;
930
931 return slot;
932 }
933
934 /*
935 * About rmap_head encoding:
936 *
937 * If the bit zero of rmap_head->val is clear, then it points to the only spte
938 * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
939 * pte_list_desc containing more mappings.
940 */
941
942 /*
943 * Returns the number of pointers in the rmap chain, not counting the new one.
944 */
pte_list_add(struct kvm_mmu_memory_cache * cache,u64 * spte,struct kvm_rmap_head * rmap_head)945 static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
946 struct kvm_rmap_head *rmap_head)
947 {
948 struct pte_list_desc *desc;
949 int count = 0;
950
951 if (!rmap_head->val) {
952 rmap_head->val = (unsigned long)spte;
953 } else if (!(rmap_head->val & 1)) {
954 desc = kvm_mmu_memory_cache_alloc(cache);
955 desc->sptes[0] = (u64 *)rmap_head->val;
956 desc->sptes[1] = spte;
957 desc->spte_count = 2;
958 desc->tail_count = 0;
959 rmap_head->val = (unsigned long)desc | 1;
960 ++count;
961 } else {
962 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
963 count = desc->tail_count + desc->spte_count;
964
965 /*
966 * If the previous head is full, allocate a new head descriptor
967 * as tail descriptors are always kept full.
968 */
969 if (desc->spte_count == PTE_LIST_EXT) {
970 desc = kvm_mmu_memory_cache_alloc(cache);
971 desc->more = (struct pte_list_desc *)(rmap_head->val & ~1ul);
972 desc->spte_count = 0;
973 desc->tail_count = count;
974 rmap_head->val = (unsigned long)desc | 1;
975 }
976 desc->sptes[desc->spte_count++] = spte;
977 }
978 return count;
979 }
980
pte_list_desc_remove_entry(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct pte_list_desc * desc,int i)981 static void pte_list_desc_remove_entry(struct kvm *kvm,
982 struct kvm_rmap_head *rmap_head,
983 struct pte_list_desc *desc, int i)
984 {
985 struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
986 int j = head_desc->spte_count - 1;
987
988 /*
989 * The head descriptor should never be empty. A new head is added only
990 * when adding an entry and the previous head is full, and heads are
991 * removed (this flow) when they become empty.
992 */
993 KVM_BUG_ON_DATA_CORRUPTION(j < 0, kvm);
994
995 /*
996 * Replace the to-be-freed SPTE with the last valid entry from the head
997 * descriptor to ensure that tail descriptors are full at all times.
998 * Note, this also means that tail_count is stable for each descriptor.
999 */
1000 desc->sptes[i] = head_desc->sptes[j];
1001 head_desc->sptes[j] = NULL;
1002 head_desc->spte_count--;
1003 if (head_desc->spte_count)
1004 return;
1005
1006 /*
1007 * The head descriptor is empty. If there are no tail descriptors,
1008 * nullify the rmap head to mark the list as empty, else point the rmap
1009 * head at the next descriptor, i.e. the new head.
1010 */
1011 if (!head_desc->more)
1012 rmap_head->val = 0;
1013 else
1014 rmap_head->val = (unsigned long)head_desc->more | 1;
1015 mmu_free_pte_list_desc(head_desc);
1016 }
1017
pte_list_remove(struct kvm * kvm,u64 * spte,struct kvm_rmap_head * rmap_head)1018 static void pte_list_remove(struct kvm *kvm, u64 *spte,
1019 struct kvm_rmap_head *rmap_head)
1020 {
1021 struct pte_list_desc *desc;
1022 int i;
1023
1024 if (KVM_BUG_ON_DATA_CORRUPTION(!rmap_head->val, kvm))
1025 return;
1026
1027 if (!(rmap_head->val & 1)) {
1028 if (KVM_BUG_ON_DATA_CORRUPTION((u64 *)rmap_head->val != spte, kvm))
1029 return;
1030
1031 rmap_head->val = 0;
1032 } else {
1033 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1034 while (desc) {
1035 for (i = 0; i < desc->spte_count; ++i) {
1036 if (desc->sptes[i] == spte) {
1037 pte_list_desc_remove_entry(kvm, rmap_head,
1038 desc, i);
1039 return;
1040 }
1041 }
1042 desc = desc->more;
1043 }
1044
1045 KVM_BUG_ON_DATA_CORRUPTION(true, kvm);
1046 }
1047 }
1048
kvm_zap_one_rmap_spte(struct kvm * kvm,struct kvm_rmap_head * rmap_head,u64 * sptep)1049 static void kvm_zap_one_rmap_spte(struct kvm *kvm,
1050 struct kvm_rmap_head *rmap_head, u64 *sptep)
1051 {
1052 mmu_spte_clear_track_bits(kvm, sptep);
1053 pte_list_remove(kvm, sptep, rmap_head);
1054 }
1055
1056 /* Return true if at least one SPTE was zapped, false otherwise */
kvm_zap_all_rmap_sptes(struct kvm * kvm,struct kvm_rmap_head * rmap_head)1057 static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
1058 struct kvm_rmap_head *rmap_head)
1059 {
1060 struct pte_list_desc *desc, *next;
1061 int i;
1062
1063 if (!rmap_head->val)
1064 return false;
1065
1066 if (!(rmap_head->val & 1)) {
1067 mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
1068 goto out;
1069 }
1070
1071 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1072
1073 for (; desc; desc = next) {
1074 for (i = 0; i < desc->spte_count; i++)
1075 mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
1076 next = desc->more;
1077 mmu_free_pte_list_desc(desc);
1078 }
1079 out:
1080 /* rmap_head is meaningless now, remember to reset it */
1081 rmap_head->val = 0;
1082 return true;
1083 }
1084
pte_list_count(struct kvm_rmap_head * rmap_head)1085 unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
1086 {
1087 struct pte_list_desc *desc;
1088
1089 if (!rmap_head->val)
1090 return 0;
1091 else if (!(rmap_head->val & 1))
1092 return 1;
1093
1094 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1095 return desc->tail_count + desc->spte_count;
1096 }
1097
gfn_to_rmap(gfn_t gfn,int level,const struct kvm_memory_slot * slot)1098 static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
1099 const struct kvm_memory_slot *slot)
1100 {
1101 unsigned long idx;
1102
1103 idx = gfn_to_index(gfn, slot->base_gfn, level);
1104 return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1105 }
1106
rmap_remove(struct kvm * kvm,u64 * spte)1107 static void rmap_remove(struct kvm *kvm, u64 *spte)
1108 {
1109 struct kvm_memslots *slots;
1110 struct kvm_memory_slot *slot;
1111 struct kvm_mmu_page *sp;
1112 gfn_t gfn;
1113 struct kvm_rmap_head *rmap_head;
1114
1115 sp = sptep_to_sp(spte);
1116 gfn = kvm_mmu_page_get_gfn(sp, spte_index(spte));
1117
1118 /*
1119 * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
1120 * so we have to determine which memslots to use based on context
1121 * information in sp->role.
1122 */
1123 slots = kvm_memslots_for_spte_role(kvm, sp->role);
1124
1125 slot = __gfn_to_memslot(slots, gfn);
1126 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1127
1128 pte_list_remove(kvm, spte, rmap_head);
1129 }
1130
1131 /*
1132 * Used by the following functions to iterate through the sptes linked by a
1133 * rmap. All fields are private and not assumed to be used outside.
1134 */
1135 struct rmap_iterator {
1136 /* private fields */
1137 struct pte_list_desc *desc; /* holds the sptep if not NULL */
1138 int pos; /* index of the sptep */
1139 };
1140
1141 /*
1142 * Iteration must be started by this function. This should also be used after
1143 * removing/dropping sptes from the rmap link because in such cases the
1144 * information in the iterator may not be valid.
1145 *
1146 * Returns sptep if found, NULL otherwise.
1147 */
rmap_get_first(struct kvm_rmap_head * rmap_head,struct rmap_iterator * iter)1148 static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1149 struct rmap_iterator *iter)
1150 {
1151 u64 *sptep;
1152
1153 if (!rmap_head->val)
1154 return NULL;
1155
1156 if (!(rmap_head->val & 1)) {
1157 iter->desc = NULL;
1158 sptep = (u64 *)rmap_head->val;
1159 goto out;
1160 }
1161
1162 iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1163 iter->pos = 0;
1164 sptep = iter->desc->sptes[iter->pos];
1165 out:
1166 BUG_ON(!is_shadow_present_pte(*sptep));
1167 return sptep;
1168 }
1169
1170 /*
1171 * Must be used with a valid iterator: e.g. after rmap_get_first().
1172 *
1173 * Returns sptep if found, NULL otherwise.
1174 */
rmap_get_next(struct rmap_iterator * iter)1175 static u64 *rmap_get_next(struct rmap_iterator *iter)
1176 {
1177 u64 *sptep;
1178
1179 if (iter->desc) {
1180 if (iter->pos < PTE_LIST_EXT - 1) {
1181 ++iter->pos;
1182 sptep = iter->desc->sptes[iter->pos];
1183 if (sptep)
1184 goto out;
1185 }
1186
1187 iter->desc = iter->desc->more;
1188
1189 if (iter->desc) {
1190 iter->pos = 0;
1191 /* desc->sptes[0] cannot be NULL */
1192 sptep = iter->desc->sptes[iter->pos];
1193 goto out;
1194 }
1195 }
1196
1197 return NULL;
1198 out:
1199 BUG_ON(!is_shadow_present_pte(*sptep));
1200 return sptep;
1201 }
1202
1203 #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) \
1204 for (_spte_ = rmap_get_first(_rmap_head_, _iter_); \
1205 _spte_; _spte_ = rmap_get_next(_iter_))
1206
drop_spte(struct kvm * kvm,u64 * sptep)1207 static void drop_spte(struct kvm *kvm, u64 *sptep)
1208 {
1209 u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
1210
1211 if (is_shadow_present_pte(old_spte))
1212 rmap_remove(kvm, sptep);
1213 }
1214
drop_large_spte(struct kvm * kvm,u64 * sptep,bool flush)1215 static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush)
1216 {
1217 struct kvm_mmu_page *sp;
1218
1219 sp = sptep_to_sp(sptep);
1220 WARN_ON_ONCE(sp->role.level == PG_LEVEL_4K);
1221
1222 drop_spte(kvm, sptep);
1223
1224 if (flush)
1225 kvm_flush_remote_tlbs_sptep(kvm, sptep);
1226 }
1227
1228 /*
1229 * Write-protect on the specified @sptep, @pt_protect indicates whether
1230 * spte write-protection is caused by protecting shadow page table.
1231 *
1232 * Note: write protection is difference between dirty logging and spte
1233 * protection:
1234 * - for dirty logging, the spte can be set to writable at anytime if
1235 * its dirty bitmap is properly set.
1236 * - for spte protection, the spte can be writable only after unsync-ing
1237 * shadow page.
1238 *
1239 * Return true if tlb need be flushed.
1240 */
spte_write_protect(u64 * sptep,bool pt_protect)1241 static bool spte_write_protect(u64 *sptep, bool pt_protect)
1242 {
1243 u64 spte = *sptep;
1244
1245 if (!is_writable_pte(spte) &&
1246 !(pt_protect && is_mmu_writable_spte(spte)))
1247 return false;
1248
1249 if (pt_protect)
1250 spte &= ~shadow_mmu_writable_mask;
1251 spte = spte & ~PT_WRITABLE_MASK;
1252
1253 return mmu_spte_update(sptep, spte);
1254 }
1255
rmap_write_protect(struct kvm_rmap_head * rmap_head,bool pt_protect)1256 static bool rmap_write_protect(struct kvm_rmap_head *rmap_head,
1257 bool pt_protect)
1258 {
1259 u64 *sptep;
1260 struct rmap_iterator iter;
1261 bool flush = false;
1262
1263 for_each_rmap_spte(rmap_head, &iter, sptep)
1264 flush |= spte_write_protect(sptep, pt_protect);
1265
1266 return flush;
1267 }
1268
spte_clear_dirty(u64 * sptep)1269 static bool spte_clear_dirty(u64 *sptep)
1270 {
1271 u64 spte = *sptep;
1272
1273 KVM_MMU_WARN_ON(!spte_ad_enabled(spte));
1274 spte &= ~shadow_dirty_mask;
1275 return mmu_spte_update(sptep, spte);
1276 }
1277
spte_wrprot_for_clear_dirty(u64 * sptep)1278 static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1279 {
1280 bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
1281 (unsigned long *)sptep);
1282 if (was_writable && !spte_ad_enabled(*sptep))
1283 kvm_set_pfn_dirty(spte_to_pfn(*sptep));
1284
1285 return was_writable;
1286 }
1287
1288 /*
1289 * Gets the GFN ready for another round of dirty logging by clearing the
1290 * - D bit on ad-enabled SPTEs, and
1291 * - W bit on ad-disabled SPTEs.
1292 * Returns true iff any D or W bits were cleared.
1293 */
__rmap_clear_dirty(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)1294 static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1295 const struct kvm_memory_slot *slot)
1296 {
1297 u64 *sptep;
1298 struct rmap_iterator iter;
1299 bool flush = false;
1300
1301 for_each_rmap_spte(rmap_head, &iter, sptep)
1302 if (spte_ad_need_write_protect(*sptep))
1303 flush |= spte_wrprot_for_clear_dirty(sptep);
1304 else
1305 flush |= spte_clear_dirty(sptep);
1306
1307 return flush;
1308 }
1309
1310 /**
1311 * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1312 * @kvm: kvm instance
1313 * @slot: slot to protect
1314 * @gfn_offset: start of the BITS_PER_LONG pages we care about
1315 * @mask: indicates which pages we should protect
1316 *
1317 * Used when we do not need to care about huge page mappings.
1318 */
kvm_mmu_write_protect_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1319 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1320 struct kvm_memory_slot *slot,
1321 gfn_t gfn_offset, unsigned long mask)
1322 {
1323 struct kvm_rmap_head *rmap_head;
1324
1325 if (tdp_mmu_enabled)
1326 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1327 slot->base_gfn + gfn_offset, mask, true);
1328
1329 if (!kvm_memslots_have_rmaps(kvm))
1330 return;
1331
1332 while (mask) {
1333 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1334 PG_LEVEL_4K, slot);
1335 rmap_write_protect(rmap_head, false);
1336
1337 /* clear the first set bit */
1338 mask &= mask - 1;
1339 }
1340 }
1341
1342 /**
1343 * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
1344 * protect the page if the D-bit isn't supported.
1345 * @kvm: kvm instance
1346 * @slot: slot to clear D-bit
1347 * @gfn_offset: start of the BITS_PER_LONG pages we care about
1348 * @mask: indicates which pages we should clear D-bit
1349 *
1350 * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1351 */
kvm_mmu_clear_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1352 static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1353 struct kvm_memory_slot *slot,
1354 gfn_t gfn_offset, unsigned long mask)
1355 {
1356 struct kvm_rmap_head *rmap_head;
1357
1358 if (tdp_mmu_enabled)
1359 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1360 slot->base_gfn + gfn_offset, mask, false);
1361
1362 if (!kvm_memslots_have_rmaps(kvm))
1363 return;
1364
1365 while (mask) {
1366 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1367 PG_LEVEL_4K, slot);
1368 __rmap_clear_dirty(kvm, rmap_head, slot);
1369
1370 /* clear the first set bit */
1371 mask &= mask - 1;
1372 }
1373 }
1374
1375 /**
1376 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1377 * PT level pages.
1378 *
1379 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1380 * enable dirty logging for them.
1381 *
1382 * We need to care about huge page mappings: e.g. during dirty logging we may
1383 * have such mappings.
1384 */
kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1385 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1386 struct kvm_memory_slot *slot,
1387 gfn_t gfn_offset, unsigned long mask)
1388 {
1389 /*
1390 * Huge pages are NOT write protected when we start dirty logging in
1391 * initially-all-set mode; must write protect them here so that they
1392 * are split to 4K on the first write.
1393 *
1394 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
1395 * of memslot has no such restriction, so the range can cross two large
1396 * pages.
1397 */
1398 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
1399 gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
1400 gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
1401
1402 if (READ_ONCE(eager_page_split))
1403 kvm_mmu_try_split_huge_pages(kvm, slot, start, end + 1, PG_LEVEL_4K);
1404
1405 kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
1406
1407 /* Cross two large pages? */
1408 if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
1409 ALIGN(end << PAGE_SHIFT, PMD_SIZE))
1410 kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
1411 PG_LEVEL_2M);
1412 }
1413
1414 /* Now handle 4K PTEs. */
1415 if (kvm_x86_ops.cpu_dirty_log_size)
1416 kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1417 else
1418 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1419 }
1420
kvm_cpu_dirty_log_size(void)1421 int kvm_cpu_dirty_log_size(void)
1422 {
1423 return kvm_x86_ops.cpu_dirty_log_size;
1424 }
1425
kvm_mmu_slot_gfn_write_protect(struct kvm * kvm,struct kvm_memory_slot * slot,u64 gfn,int min_level)1426 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1427 struct kvm_memory_slot *slot, u64 gfn,
1428 int min_level)
1429 {
1430 struct kvm_rmap_head *rmap_head;
1431 int i;
1432 bool write_protected = false;
1433
1434 if (kvm_memslots_have_rmaps(kvm)) {
1435 for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1436 rmap_head = gfn_to_rmap(gfn, i, slot);
1437 write_protected |= rmap_write_protect(rmap_head, true);
1438 }
1439 }
1440
1441 if (tdp_mmu_enabled)
1442 write_protected |=
1443 kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
1444
1445 return write_protected;
1446 }
1447
kvm_vcpu_write_protect_gfn(struct kvm_vcpu * vcpu,u64 gfn)1448 static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
1449 {
1450 struct kvm_memory_slot *slot;
1451
1452 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1453 return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1454 }
1455
__kvm_zap_rmap(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)1456 static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1457 const struct kvm_memory_slot *slot)
1458 {
1459 return kvm_zap_all_rmap_sptes(kvm, rmap_head);
1460 }
1461
kvm_zap_rmap(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct kvm_memory_slot * slot,gfn_t gfn,int level)1462 static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1463 struct kvm_memory_slot *slot, gfn_t gfn, int level)
1464 {
1465 return __kvm_zap_rmap(kvm, rmap_head, slot);
1466 }
1467
1468 struct slot_rmap_walk_iterator {
1469 /* input fields. */
1470 const struct kvm_memory_slot *slot;
1471 gfn_t start_gfn;
1472 gfn_t end_gfn;
1473 int start_level;
1474 int end_level;
1475
1476 /* output fields. */
1477 gfn_t gfn;
1478 struct kvm_rmap_head *rmap;
1479 int level;
1480
1481 /* private field. */
1482 struct kvm_rmap_head *end_rmap;
1483 };
1484
rmap_walk_init_level(struct slot_rmap_walk_iterator * iterator,int level)1485 static void rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator,
1486 int level)
1487 {
1488 iterator->level = level;
1489 iterator->gfn = iterator->start_gfn;
1490 iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
1491 iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
1492 }
1493
slot_rmap_walk_init(struct slot_rmap_walk_iterator * iterator,const struct kvm_memory_slot * slot,int start_level,int end_level,gfn_t start_gfn,gfn_t end_gfn)1494 static void slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1495 const struct kvm_memory_slot *slot,
1496 int start_level, int end_level,
1497 gfn_t start_gfn, gfn_t end_gfn)
1498 {
1499 iterator->slot = slot;
1500 iterator->start_level = start_level;
1501 iterator->end_level = end_level;
1502 iterator->start_gfn = start_gfn;
1503 iterator->end_gfn = end_gfn;
1504
1505 rmap_walk_init_level(iterator, iterator->start_level);
1506 }
1507
slot_rmap_walk_okay(struct slot_rmap_walk_iterator * iterator)1508 static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1509 {
1510 return !!iterator->rmap;
1511 }
1512
slot_rmap_walk_next(struct slot_rmap_walk_iterator * iterator)1513 static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1514 {
1515 while (++iterator->rmap <= iterator->end_rmap) {
1516 iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1517
1518 if (iterator->rmap->val)
1519 return;
1520 }
1521
1522 if (++iterator->level > iterator->end_level) {
1523 iterator->rmap = NULL;
1524 return;
1525 }
1526
1527 rmap_walk_init_level(iterator, iterator->level);
1528 }
1529
1530 #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_, \
1531 _start_gfn, _end_gfn, _iter_) \
1532 for (slot_rmap_walk_init(_iter_, _slot_, _start_level_, \
1533 _end_level_, _start_gfn, _end_gfn); \
1534 slot_rmap_walk_okay(_iter_); \
1535 slot_rmap_walk_next(_iter_))
1536
1537 typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1538 struct kvm_memory_slot *slot, gfn_t gfn,
1539 int level);
1540
kvm_handle_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range,rmap_handler_t handler)1541 static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
1542 struct kvm_gfn_range *range,
1543 rmap_handler_t handler)
1544 {
1545 struct slot_rmap_walk_iterator iterator;
1546 bool ret = false;
1547
1548 for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
1549 range->start, range->end - 1, &iterator)
1550 ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
1551 iterator.level);
1552
1553 return ret;
1554 }
1555
kvm_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range)1556 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1557 {
1558 bool flush = false;
1559
1560 if (kvm_memslots_have_rmaps(kvm))
1561 flush = kvm_handle_gfn_range(kvm, range, kvm_zap_rmap);
1562
1563 if (tdp_mmu_enabled)
1564 flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
1565
1566 if (kvm_x86_ops.set_apic_access_page_addr &&
1567 range->slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT)
1568 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
1569
1570 return flush;
1571 }
1572
kvm_age_rmap(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct kvm_memory_slot * slot,gfn_t gfn,int level)1573 static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1574 struct kvm_memory_slot *slot, gfn_t gfn, int level)
1575 {
1576 u64 *sptep;
1577 struct rmap_iterator iter;
1578 int young = 0;
1579
1580 for_each_rmap_spte(rmap_head, &iter, sptep)
1581 young |= mmu_spte_age(sptep);
1582
1583 return young;
1584 }
1585
kvm_test_age_rmap(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct kvm_memory_slot * slot,gfn_t gfn,int level)1586 static bool kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1587 struct kvm_memory_slot *slot, gfn_t gfn, int level)
1588 {
1589 u64 *sptep;
1590 struct rmap_iterator iter;
1591
1592 for_each_rmap_spte(rmap_head, &iter, sptep)
1593 if (is_accessed_spte(*sptep))
1594 return true;
1595 return false;
1596 }
1597
1598 #define RMAP_RECYCLE_THRESHOLD 1000
1599
__rmap_add(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,const struct kvm_memory_slot * slot,u64 * spte,gfn_t gfn,unsigned int access)1600 static void __rmap_add(struct kvm *kvm,
1601 struct kvm_mmu_memory_cache *cache,
1602 const struct kvm_memory_slot *slot,
1603 u64 *spte, gfn_t gfn, unsigned int access)
1604 {
1605 struct kvm_mmu_page *sp;
1606 struct kvm_rmap_head *rmap_head;
1607 int rmap_count;
1608
1609 sp = sptep_to_sp(spte);
1610 kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access);
1611 kvm_update_page_stats(kvm, sp->role.level, 1);
1612
1613 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1614 rmap_count = pte_list_add(cache, spte, rmap_head);
1615
1616 if (rmap_count > kvm->stat.max_mmu_rmap_size)
1617 kvm->stat.max_mmu_rmap_size = rmap_count;
1618 if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
1619 kvm_zap_all_rmap_sptes(kvm, rmap_head);
1620 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
1621 }
1622 }
1623
rmap_add(struct kvm_vcpu * vcpu,const struct kvm_memory_slot * slot,u64 * spte,gfn_t gfn,unsigned int access)1624 static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
1625 u64 *spte, gfn_t gfn, unsigned int access)
1626 {
1627 struct kvm_mmu_memory_cache *cache = &vcpu->arch.mmu_pte_list_desc_cache;
1628
1629 __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
1630 }
1631
kvm_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)1632 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1633 {
1634 bool young = false;
1635
1636 if (kvm_memslots_have_rmaps(kvm))
1637 young = kvm_handle_gfn_range(kvm, range, kvm_age_rmap);
1638
1639 if (tdp_mmu_enabled)
1640 young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
1641
1642 return young;
1643 }
1644
kvm_test_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)1645 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1646 {
1647 bool young = false;
1648
1649 if (kvm_memslots_have_rmaps(kvm))
1650 young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmap);
1651
1652 if (tdp_mmu_enabled)
1653 young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
1654
1655 return young;
1656 }
1657
kvm_mmu_check_sptes_at_free(struct kvm_mmu_page * sp)1658 static void kvm_mmu_check_sptes_at_free(struct kvm_mmu_page *sp)
1659 {
1660 #ifdef CONFIG_KVM_PROVE_MMU
1661 int i;
1662
1663 for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
1664 if (KVM_MMU_WARN_ON(is_shadow_present_pte(sp->spt[i])))
1665 pr_err_ratelimited("SPTE %llx (@ %p) for gfn %llx shadow-present at free",
1666 sp->spt[i], &sp->spt[i],
1667 kvm_mmu_page_get_gfn(sp, i));
1668 }
1669 #endif
1670 }
1671
1672 /*
1673 * This value is the sum of all of the kvm instances's
1674 * kvm->arch.n_used_mmu_pages values. We need a global,
1675 * aggregate version in order to make the slab shrinker
1676 * faster
1677 */
kvm_mod_used_mmu_pages(struct kvm * kvm,long nr)1678 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
1679 {
1680 kvm->arch.n_used_mmu_pages += nr;
1681 percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1682 }
1683
kvm_account_mmu_page(struct kvm * kvm,struct kvm_mmu_page * sp)1684 static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1685 {
1686 kvm_mod_used_mmu_pages(kvm, +1);
1687 kvm_account_pgtable_pages((void *)sp->spt, +1);
1688 }
1689
kvm_unaccount_mmu_page(struct kvm * kvm,struct kvm_mmu_page * sp)1690 static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1691 {
1692 kvm_mod_used_mmu_pages(kvm, -1);
1693 kvm_account_pgtable_pages((void *)sp->spt, -1);
1694 }
1695
kvm_mmu_free_shadow_page(struct kvm_mmu_page * sp)1696 static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
1697 {
1698 kvm_mmu_check_sptes_at_free(sp);
1699
1700 hlist_del(&sp->hash_link);
1701 list_del(&sp->link);
1702 free_page((unsigned long)sp->spt);
1703 if (!sp->role.direct)
1704 free_page((unsigned long)sp->shadowed_translation);
1705 kmem_cache_free(mmu_page_header_cache, sp);
1706 }
1707
kvm_page_table_hashfn(gfn_t gfn)1708 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1709 {
1710 return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1711 }
1712
mmu_page_add_parent_pte(struct kvm_mmu_memory_cache * cache,struct kvm_mmu_page * sp,u64 * parent_pte)1713 static void mmu_page_add_parent_pte(struct kvm_mmu_memory_cache *cache,
1714 struct kvm_mmu_page *sp, u64 *parent_pte)
1715 {
1716 if (!parent_pte)
1717 return;
1718
1719 pte_list_add(cache, parent_pte, &sp->parent_ptes);
1720 }
1721
mmu_page_remove_parent_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * parent_pte)1722 static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
1723 u64 *parent_pte)
1724 {
1725 pte_list_remove(kvm, parent_pte, &sp->parent_ptes);
1726 }
1727
drop_parent_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * parent_pte)1728 static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
1729 u64 *parent_pte)
1730 {
1731 mmu_page_remove_parent_pte(kvm, sp, parent_pte);
1732 mmu_spte_clear_no_track(parent_pte);
1733 }
1734
1735 static void mark_unsync(u64 *spte);
kvm_mmu_mark_parents_unsync(struct kvm_mmu_page * sp)1736 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1737 {
1738 u64 *sptep;
1739 struct rmap_iterator iter;
1740
1741 for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1742 mark_unsync(sptep);
1743 }
1744 }
1745
mark_unsync(u64 * spte)1746 static void mark_unsync(u64 *spte)
1747 {
1748 struct kvm_mmu_page *sp;
1749
1750 sp = sptep_to_sp(spte);
1751 if (__test_and_set_bit(spte_index(spte), sp->unsync_child_bitmap))
1752 return;
1753 if (sp->unsync_children++)
1754 return;
1755 kvm_mmu_mark_parents_unsync(sp);
1756 }
1757
1758 #define KVM_PAGE_ARRAY_NR 16
1759
1760 struct kvm_mmu_pages {
1761 struct mmu_page_and_offset {
1762 struct kvm_mmu_page *sp;
1763 unsigned int idx;
1764 } page[KVM_PAGE_ARRAY_NR];
1765 unsigned int nr;
1766 };
1767
mmu_pages_add(struct kvm_mmu_pages * pvec,struct kvm_mmu_page * sp,int idx)1768 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1769 int idx)
1770 {
1771 int i;
1772
1773 if (sp->unsync)
1774 for (i=0; i < pvec->nr; i++)
1775 if (pvec->page[i].sp == sp)
1776 return 0;
1777
1778 pvec->page[pvec->nr].sp = sp;
1779 pvec->page[pvec->nr].idx = idx;
1780 pvec->nr++;
1781 return (pvec->nr == KVM_PAGE_ARRAY_NR);
1782 }
1783
clear_unsync_child_bit(struct kvm_mmu_page * sp,int idx)1784 static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1785 {
1786 --sp->unsync_children;
1787 WARN_ON_ONCE((int)sp->unsync_children < 0);
1788 __clear_bit(idx, sp->unsync_child_bitmap);
1789 }
1790
__mmu_unsync_walk(struct kvm_mmu_page * sp,struct kvm_mmu_pages * pvec)1791 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1792 struct kvm_mmu_pages *pvec)
1793 {
1794 int i, ret, nr_unsync_leaf = 0;
1795
1796 for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1797 struct kvm_mmu_page *child;
1798 u64 ent = sp->spt[i];
1799
1800 if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1801 clear_unsync_child_bit(sp, i);
1802 continue;
1803 }
1804
1805 child = spte_to_child_sp(ent);
1806
1807 if (child->unsync_children) {
1808 if (mmu_pages_add(pvec, child, i))
1809 return -ENOSPC;
1810
1811 ret = __mmu_unsync_walk(child, pvec);
1812 if (!ret) {
1813 clear_unsync_child_bit(sp, i);
1814 continue;
1815 } else if (ret > 0) {
1816 nr_unsync_leaf += ret;
1817 } else
1818 return ret;
1819 } else if (child->unsync) {
1820 nr_unsync_leaf++;
1821 if (mmu_pages_add(pvec, child, i))
1822 return -ENOSPC;
1823 } else
1824 clear_unsync_child_bit(sp, i);
1825 }
1826
1827 return nr_unsync_leaf;
1828 }
1829
1830 #define INVALID_INDEX (-1)
1831
mmu_unsync_walk(struct kvm_mmu_page * sp,struct kvm_mmu_pages * pvec)1832 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1833 struct kvm_mmu_pages *pvec)
1834 {
1835 pvec->nr = 0;
1836 if (!sp->unsync_children)
1837 return 0;
1838
1839 mmu_pages_add(pvec, sp, INVALID_INDEX);
1840 return __mmu_unsync_walk(sp, pvec);
1841 }
1842
kvm_unlink_unsync_page(struct kvm * kvm,struct kvm_mmu_page * sp)1843 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1844 {
1845 WARN_ON_ONCE(!sp->unsync);
1846 trace_kvm_mmu_sync_page(sp);
1847 sp->unsync = 0;
1848 --kvm->stat.mmu_unsync;
1849 }
1850
1851 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1852 struct list_head *invalid_list);
1853 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1854 struct list_head *invalid_list);
1855
sp_has_gptes(struct kvm_mmu_page * sp)1856 static bool sp_has_gptes(struct kvm_mmu_page *sp)
1857 {
1858 if (sp->role.direct)
1859 return false;
1860
1861 if (sp->role.passthrough)
1862 return false;
1863
1864 return true;
1865 }
1866
1867 #define for_each_valid_sp(_kvm, _sp, _list) \
1868 hlist_for_each_entry(_sp, _list, hash_link) \
1869 if (is_obsolete_sp((_kvm), (_sp))) { \
1870 } else
1871
1872 #define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \
1873 for_each_valid_sp(_kvm, _sp, \
1874 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
1875 if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
1876
kvm_sync_page_check(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp)1877 static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1878 {
1879 union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
1880
1881 /*
1882 * Ignore various flags when verifying that it's safe to sync a shadow
1883 * page using the current MMU context.
1884 *
1885 * - level: not part of the overall MMU role and will never match as the MMU's
1886 * level tracks the root level
1887 * - access: updated based on the new guest PTE
1888 * - quadrant: not part of the overall MMU role (similar to level)
1889 */
1890 const union kvm_mmu_page_role sync_role_ign = {
1891 .level = 0xf,
1892 .access = 0x7,
1893 .quadrant = 0x3,
1894 .passthrough = 0x1,
1895 };
1896
1897 /*
1898 * Direct pages can never be unsync, and KVM should never attempt to
1899 * sync a shadow page for a different MMU context, e.g. if the role
1900 * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
1901 * reserved bits checks will be wrong, etc...
1902 */
1903 if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte ||
1904 (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
1905 return false;
1906
1907 return true;
1908 }
1909
kvm_sync_spte(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,int i)1910 static int kvm_sync_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
1911 {
1912 /* sp->spt[i] has initial value of shadow page table allocation */
1913 if (sp->spt[i] == SHADOW_NONPRESENT_VALUE)
1914 return 0;
1915
1916 return vcpu->arch.mmu->sync_spte(vcpu, sp, i);
1917 }
1918
__kvm_sync_page(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp)1919 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1920 {
1921 int flush = 0;
1922 int i;
1923
1924 if (!kvm_sync_page_check(vcpu, sp))
1925 return -1;
1926
1927 for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
1928 int ret = kvm_sync_spte(vcpu, sp, i);
1929
1930 if (ret < -1)
1931 return -1;
1932 flush |= ret;
1933 }
1934
1935 /*
1936 * Note, any flush is purely for KVM's correctness, e.g. when dropping
1937 * an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
1938 * unmap or dirty logging event doesn't fail to flush. The guest is
1939 * responsible for flushing the TLB to ensure any changes in protection
1940 * bits are recognized, i.e. until the guest flushes or page faults on
1941 * a relevant address, KVM is architecturally allowed to let vCPUs use
1942 * cached translations with the old protection bits.
1943 */
1944 return flush;
1945 }
1946
kvm_sync_page(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,struct list_head * invalid_list)1947 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1948 struct list_head *invalid_list)
1949 {
1950 int ret = __kvm_sync_page(vcpu, sp);
1951
1952 if (ret < 0)
1953 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1954 return ret;
1955 }
1956
kvm_mmu_remote_flush_or_zap(struct kvm * kvm,struct list_head * invalid_list,bool remote_flush)1957 static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
1958 struct list_head *invalid_list,
1959 bool remote_flush)
1960 {
1961 if (!remote_flush && list_empty(invalid_list))
1962 return false;
1963
1964 if (!list_empty(invalid_list))
1965 kvm_mmu_commit_zap_page(kvm, invalid_list);
1966 else
1967 kvm_flush_remote_tlbs(kvm);
1968 return true;
1969 }
1970
is_obsolete_sp(struct kvm * kvm,struct kvm_mmu_page * sp)1971 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
1972 {
1973 if (sp->role.invalid)
1974 return true;
1975
1976 /* TDP MMU pages do not use the MMU generation. */
1977 return !is_tdp_mmu_page(sp) &&
1978 unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1979 }
1980
1981 struct mmu_page_path {
1982 struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
1983 unsigned int idx[PT64_ROOT_MAX_LEVEL];
1984 };
1985
1986 #define for_each_sp(pvec, sp, parents, i) \
1987 for (i = mmu_pages_first(&pvec, &parents); \
1988 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
1989 i = mmu_pages_next(&pvec, &parents, i))
1990
mmu_pages_next(struct kvm_mmu_pages * pvec,struct mmu_page_path * parents,int i)1991 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1992 struct mmu_page_path *parents,
1993 int i)
1994 {
1995 int n;
1996
1997 for (n = i+1; n < pvec->nr; n++) {
1998 struct kvm_mmu_page *sp = pvec->page[n].sp;
1999 unsigned idx = pvec->page[n].idx;
2000 int level = sp->role.level;
2001
2002 parents->idx[level-1] = idx;
2003 if (level == PG_LEVEL_4K)
2004 break;
2005
2006 parents->parent[level-2] = sp;
2007 }
2008
2009 return n;
2010 }
2011
mmu_pages_first(struct kvm_mmu_pages * pvec,struct mmu_page_path * parents)2012 static int mmu_pages_first(struct kvm_mmu_pages *pvec,
2013 struct mmu_page_path *parents)
2014 {
2015 struct kvm_mmu_page *sp;
2016 int level;
2017
2018 if (pvec->nr == 0)
2019 return 0;
2020
2021 WARN_ON_ONCE(pvec->page[0].idx != INVALID_INDEX);
2022
2023 sp = pvec->page[0].sp;
2024 level = sp->role.level;
2025 WARN_ON_ONCE(level == PG_LEVEL_4K);
2026
2027 parents->parent[level-2] = sp;
2028
2029 /* Also set up a sentinel. Further entries in pvec are all
2030 * children of sp, so this element is never overwritten.
2031 */
2032 parents->parent[level-1] = NULL;
2033 return mmu_pages_next(pvec, parents, 0);
2034 }
2035
mmu_pages_clear_parents(struct mmu_page_path * parents)2036 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
2037 {
2038 struct kvm_mmu_page *sp;
2039 unsigned int level = 0;
2040
2041 do {
2042 unsigned int idx = parents->idx[level];
2043 sp = parents->parent[level];
2044 if (!sp)
2045 return;
2046
2047 WARN_ON_ONCE(idx == INVALID_INDEX);
2048 clear_unsync_child_bit(sp, idx);
2049 level++;
2050 } while (!sp->unsync_children);
2051 }
2052
mmu_sync_children(struct kvm_vcpu * vcpu,struct kvm_mmu_page * parent,bool can_yield)2053 static int mmu_sync_children(struct kvm_vcpu *vcpu,
2054 struct kvm_mmu_page *parent, bool can_yield)
2055 {
2056 int i;
2057 struct kvm_mmu_page *sp;
2058 struct mmu_page_path parents;
2059 struct kvm_mmu_pages pages;
2060 LIST_HEAD(invalid_list);
2061 bool flush = false;
2062
2063 while (mmu_unsync_walk(parent, &pages)) {
2064 bool protected = false;
2065
2066 for_each_sp(pages, sp, parents, i)
2067 protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn);
2068
2069 if (protected) {
2070 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true);
2071 flush = false;
2072 }
2073
2074 for_each_sp(pages, sp, parents, i) {
2075 kvm_unlink_unsync_page(vcpu->kvm, sp);
2076 flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0;
2077 mmu_pages_clear_parents(&parents);
2078 }
2079 if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2080 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2081 if (!can_yield) {
2082 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2083 return -EINTR;
2084 }
2085
2086 cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2087 flush = false;
2088 }
2089 }
2090
2091 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2092 return 0;
2093 }
2094
__clear_sp_write_flooding_count(struct kvm_mmu_page * sp)2095 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
2096 {
2097 atomic_set(&sp->write_flooding_count, 0);
2098 }
2099
clear_sp_write_flooding_count(u64 * spte)2100 static void clear_sp_write_flooding_count(u64 *spte)
2101 {
2102 __clear_sp_write_flooding_count(sptep_to_sp(spte));
2103 }
2104
2105 /*
2106 * The vCPU is required when finding indirect shadow pages; the shadow
2107 * page may already exist and syncing it needs the vCPU pointer in
2108 * order to read guest page tables. Direct shadow pages are never
2109 * unsync, thus @vcpu can be NULL if @role.direct is true.
2110 */
kvm_mmu_find_shadow_page(struct kvm * kvm,struct kvm_vcpu * vcpu,gfn_t gfn,struct hlist_head * sp_list,union kvm_mmu_page_role role)2111 static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm,
2112 struct kvm_vcpu *vcpu,
2113 gfn_t gfn,
2114 struct hlist_head *sp_list,
2115 union kvm_mmu_page_role role)
2116 {
2117 struct kvm_mmu_page *sp;
2118 int ret;
2119 int collisions = 0;
2120 LIST_HEAD(invalid_list);
2121
2122 for_each_valid_sp(kvm, sp, sp_list) {
2123 if (sp->gfn != gfn) {
2124 collisions++;
2125 continue;
2126 }
2127
2128 if (sp->role.word != role.word) {
2129 /*
2130 * If the guest is creating an upper-level page, zap
2131 * unsync pages for the same gfn. While it's possible
2132 * the guest is using recursive page tables, in all
2133 * likelihood the guest has stopped using the unsync
2134 * page and is installing a completely unrelated page.
2135 * Unsync pages must not be left as is, because the new
2136 * upper-level page will be write-protected.
2137 */
2138 if (role.level > PG_LEVEL_4K && sp->unsync)
2139 kvm_mmu_prepare_zap_page(kvm, sp,
2140 &invalid_list);
2141 continue;
2142 }
2143
2144 /* unsync and write-flooding only apply to indirect SPs. */
2145 if (sp->role.direct)
2146 goto out;
2147
2148 if (sp->unsync) {
2149 if (KVM_BUG_ON(!vcpu, kvm))
2150 break;
2151
2152 /*
2153 * The page is good, but is stale. kvm_sync_page does
2154 * get the latest guest state, but (unlike mmu_unsync_children)
2155 * it doesn't write-protect the page or mark it synchronized!
2156 * This way the validity of the mapping is ensured, but the
2157 * overhead of write protection is not incurred until the
2158 * guest invalidates the TLB mapping. This allows multiple
2159 * SPs for a single gfn to be unsync.
2160 *
2161 * If the sync fails, the page is zapped. If so, break
2162 * in order to rebuild it.
2163 */
2164 ret = kvm_sync_page(vcpu, sp, &invalid_list);
2165 if (ret < 0)
2166 break;
2167
2168 WARN_ON_ONCE(!list_empty(&invalid_list));
2169 if (ret > 0)
2170 kvm_flush_remote_tlbs(kvm);
2171 }
2172
2173 __clear_sp_write_flooding_count(sp);
2174
2175 goto out;
2176 }
2177
2178 sp = NULL;
2179 ++kvm->stat.mmu_cache_miss;
2180
2181 out:
2182 kvm_mmu_commit_zap_page(kvm, &invalid_list);
2183
2184 if (collisions > kvm->stat.max_mmu_page_hash_collisions)
2185 kvm->stat.max_mmu_page_hash_collisions = collisions;
2186 return sp;
2187 }
2188
2189 /* Caches used when allocating a new shadow page. */
2190 struct shadow_page_caches {
2191 struct kvm_mmu_memory_cache *page_header_cache;
2192 struct kvm_mmu_memory_cache *shadow_page_cache;
2193 struct kvm_mmu_memory_cache *shadowed_info_cache;
2194 };
2195
kvm_mmu_alloc_shadow_page(struct kvm * kvm,struct shadow_page_caches * caches,gfn_t gfn,struct hlist_head * sp_list,union kvm_mmu_page_role role)2196 static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
2197 struct shadow_page_caches *caches,
2198 gfn_t gfn,
2199 struct hlist_head *sp_list,
2200 union kvm_mmu_page_role role)
2201 {
2202 struct kvm_mmu_page *sp;
2203
2204 sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
2205 sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache);
2206 if (!role.direct)
2207 sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache);
2208
2209 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
2210
2211 INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
2212
2213 /*
2214 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
2215 * depends on valid pages being added to the head of the list. See
2216 * comments in kvm_zap_obsolete_pages().
2217 */
2218 sp->mmu_valid_gen = kvm->arch.mmu_valid_gen;
2219 list_add(&sp->link, &kvm->arch.active_mmu_pages);
2220 kvm_account_mmu_page(kvm, sp);
2221
2222 sp->gfn = gfn;
2223 sp->role = role;
2224 hlist_add_head(&sp->hash_link, sp_list);
2225 if (sp_has_gptes(sp))
2226 account_shadowed(kvm, sp);
2227
2228 return sp;
2229 }
2230
2231 /* Note, @vcpu may be NULL if @role.direct is true; see kvm_mmu_find_shadow_page. */
__kvm_mmu_get_shadow_page(struct kvm * kvm,struct kvm_vcpu * vcpu,struct shadow_page_caches * caches,gfn_t gfn,union kvm_mmu_page_role role)2232 static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm,
2233 struct kvm_vcpu *vcpu,
2234 struct shadow_page_caches *caches,
2235 gfn_t gfn,
2236 union kvm_mmu_page_role role)
2237 {
2238 struct hlist_head *sp_list;
2239 struct kvm_mmu_page *sp;
2240 bool created = false;
2241
2242 sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2243
2244 sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role);
2245 if (!sp) {
2246 created = true;
2247 sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role);
2248 }
2249
2250 trace_kvm_mmu_get_page(sp, created);
2251 return sp;
2252 }
2253
kvm_mmu_get_shadow_page(struct kvm_vcpu * vcpu,gfn_t gfn,union kvm_mmu_page_role role)2254 static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
2255 gfn_t gfn,
2256 union kvm_mmu_page_role role)
2257 {
2258 struct shadow_page_caches caches = {
2259 .page_header_cache = &vcpu->arch.mmu_page_header_cache,
2260 .shadow_page_cache = &vcpu->arch.mmu_shadow_page_cache,
2261 .shadowed_info_cache = &vcpu->arch.mmu_shadowed_info_cache,
2262 };
2263
2264 return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role);
2265 }
2266
kvm_mmu_child_role(u64 * sptep,bool direct,unsigned int access)2267 static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct,
2268 unsigned int access)
2269 {
2270 struct kvm_mmu_page *parent_sp = sptep_to_sp(sptep);
2271 union kvm_mmu_page_role role;
2272
2273 role = parent_sp->role;
2274 role.level--;
2275 role.access = access;
2276 role.direct = direct;
2277 role.passthrough = 0;
2278
2279 /*
2280 * If the guest has 4-byte PTEs then that means it's using 32-bit,
2281 * 2-level, non-PAE paging. KVM shadows such guests with PAE paging
2282 * (i.e. 8-byte PTEs). The difference in PTE size means that KVM must
2283 * shadow each guest page table with multiple shadow page tables, which
2284 * requires extra bookkeeping in the role.
2285 *
2286 * Specifically, to shadow the guest's page directory (which covers a
2287 * 4GiB address space), KVM uses 4 PAE page directories, each mapping
2288 * 1GiB of the address space. @role.quadrant encodes which quarter of
2289 * the address space each maps.
2290 *
2291 * To shadow the guest's page tables (which each map a 4MiB region), KVM
2292 * uses 2 PAE page tables, each mapping a 2MiB region. For these,
2293 * @role.quadrant encodes which half of the region they map.
2294 *
2295 * Concretely, a 4-byte PDE consumes bits 31:22, while an 8-byte PDE
2296 * consumes bits 29:21. To consume bits 31:30, KVM's uses 4 shadow
2297 * PDPTEs; those 4 PAE page directories are pre-allocated and their
2298 * quadrant is assigned in mmu_alloc_root(). A 4-byte PTE consumes
2299 * bits 21:12, while an 8-byte PTE consumes bits 20:12. To consume
2300 * bit 21 in the PTE (the child here), KVM propagates that bit to the
2301 * quadrant, i.e. sets quadrant to '0' or '1'. The parent 8-byte PDE
2302 * covers bit 21 (see above), thus the quadrant is calculated from the
2303 * _least_ significant bit of the PDE index.
2304 */
2305 if (role.has_4_byte_gpte) {
2306 WARN_ON_ONCE(role.level != PG_LEVEL_4K);
2307 role.quadrant = spte_index(sptep) & 1;
2308 }
2309
2310 return role;
2311 }
2312
kvm_mmu_get_child_sp(struct kvm_vcpu * vcpu,u64 * sptep,gfn_t gfn,bool direct,unsigned int access)2313 static struct kvm_mmu_page *kvm_mmu_get_child_sp(struct kvm_vcpu *vcpu,
2314 u64 *sptep, gfn_t gfn,
2315 bool direct, unsigned int access)
2316 {
2317 union kvm_mmu_page_role role;
2318
2319 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
2320 return ERR_PTR(-EEXIST);
2321
2322 role = kvm_mmu_child_role(sptep, direct, access);
2323 return kvm_mmu_get_shadow_page(vcpu, gfn, role);
2324 }
2325
shadow_walk_init_using_root(struct kvm_shadow_walk_iterator * iterator,struct kvm_vcpu * vcpu,hpa_t root,u64 addr)2326 static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2327 struct kvm_vcpu *vcpu, hpa_t root,
2328 u64 addr)
2329 {
2330 iterator->addr = addr;
2331 iterator->shadow_addr = root;
2332 iterator->level = vcpu->arch.mmu->root_role.level;
2333
2334 if (iterator->level >= PT64_ROOT_4LEVEL &&
2335 vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL &&
2336 !vcpu->arch.mmu->root_role.direct)
2337 iterator->level = PT32E_ROOT_LEVEL;
2338
2339 if (iterator->level == PT32E_ROOT_LEVEL) {
2340 /*
2341 * prev_root is currently only used for 64-bit hosts. So only
2342 * the active root_hpa is valid here.
2343 */
2344 BUG_ON(root != vcpu->arch.mmu->root.hpa);
2345
2346 iterator->shadow_addr
2347 = vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2348 iterator->shadow_addr &= SPTE_BASE_ADDR_MASK;
2349 --iterator->level;
2350 if (!iterator->shadow_addr)
2351 iterator->level = 0;
2352 }
2353 }
2354
shadow_walk_init(struct kvm_shadow_walk_iterator * iterator,struct kvm_vcpu * vcpu,u64 addr)2355 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2356 struct kvm_vcpu *vcpu, u64 addr)
2357 {
2358 shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root.hpa,
2359 addr);
2360 }
2361
shadow_walk_okay(struct kvm_shadow_walk_iterator * iterator)2362 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2363 {
2364 if (iterator->level < PG_LEVEL_4K)
2365 return false;
2366
2367 iterator->index = SPTE_INDEX(iterator->addr, iterator->level);
2368 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2369 return true;
2370 }
2371
__shadow_walk_next(struct kvm_shadow_walk_iterator * iterator,u64 spte)2372 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2373 u64 spte)
2374 {
2375 if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) {
2376 iterator->level = 0;
2377 return;
2378 }
2379
2380 iterator->shadow_addr = spte & SPTE_BASE_ADDR_MASK;
2381 --iterator->level;
2382 }
2383
shadow_walk_next(struct kvm_shadow_walk_iterator * iterator)2384 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2385 {
2386 __shadow_walk_next(iterator, *iterator->sptep);
2387 }
2388
__link_shadow_page(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,u64 * sptep,struct kvm_mmu_page * sp,bool flush)2389 static void __link_shadow_page(struct kvm *kvm,
2390 struct kvm_mmu_memory_cache *cache, u64 *sptep,
2391 struct kvm_mmu_page *sp, bool flush)
2392 {
2393 u64 spte;
2394
2395 BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2396
2397 /*
2398 * If an SPTE is present already, it must be a leaf and therefore
2399 * a large one. Drop it, and flush the TLB if needed, before
2400 * installing sp.
2401 */
2402 if (is_shadow_present_pte(*sptep))
2403 drop_large_spte(kvm, sptep, flush);
2404
2405 spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2406
2407 mmu_spte_set(sptep, spte);
2408
2409 mmu_page_add_parent_pte(cache, sp, sptep);
2410
2411 /*
2412 * The non-direct sub-pagetable must be updated before linking. For
2413 * L1 sp, the pagetable is updated via kvm_sync_page() in
2414 * kvm_mmu_find_shadow_page() without write-protecting the gfn,
2415 * so sp->unsync can be true or false. For higher level non-direct
2416 * sp, the pagetable is updated/synced via mmu_sync_children() in
2417 * FNAME(fetch)(), so sp->unsync_children can only be false.
2418 * WARN_ON_ONCE() if anything happens unexpectedly.
2419 */
2420 if (WARN_ON_ONCE(sp->unsync_children) || sp->unsync)
2421 mark_unsync(sptep);
2422 }
2423
link_shadow_page(struct kvm_vcpu * vcpu,u64 * sptep,struct kvm_mmu_page * sp)2424 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2425 struct kvm_mmu_page *sp)
2426 {
2427 __link_shadow_page(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, sptep, sp, true);
2428 }
2429
validate_direct_spte(struct kvm_vcpu * vcpu,u64 * sptep,unsigned direct_access)2430 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2431 unsigned direct_access)
2432 {
2433 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2434 struct kvm_mmu_page *child;
2435
2436 /*
2437 * For the direct sp, if the guest pte's dirty bit
2438 * changed form clean to dirty, it will corrupt the
2439 * sp's access: allow writable in the read-only sp,
2440 * so we should update the spte at this point to get
2441 * a new sp with the correct access.
2442 */
2443 child = spte_to_child_sp(*sptep);
2444 if (child->role.access == direct_access)
2445 return;
2446
2447 drop_parent_pte(vcpu->kvm, child, sptep);
2448 kvm_flush_remote_tlbs_sptep(vcpu->kvm, sptep);
2449 }
2450 }
2451
2452 /* Returns the number of zapped non-leaf child shadow pages. */
mmu_page_zap_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * spte,struct list_head * invalid_list)2453 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2454 u64 *spte, struct list_head *invalid_list)
2455 {
2456 u64 pte;
2457 struct kvm_mmu_page *child;
2458
2459 pte = *spte;
2460 if (is_shadow_present_pte(pte)) {
2461 if (is_last_spte(pte, sp->role.level)) {
2462 drop_spte(kvm, spte);
2463 } else {
2464 child = spte_to_child_sp(pte);
2465 drop_parent_pte(kvm, child, spte);
2466
2467 /*
2468 * Recursively zap nested TDP SPs, parentless SPs are
2469 * unlikely to be used again in the near future. This
2470 * avoids retaining a large number of stale nested SPs.
2471 */
2472 if (tdp_enabled && invalid_list &&
2473 child->role.guest_mode && !child->parent_ptes.val)
2474 return kvm_mmu_prepare_zap_page(kvm, child,
2475 invalid_list);
2476 }
2477 } else if (is_mmio_spte(kvm, pte)) {
2478 mmu_spte_clear_no_track(spte);
2479 }
2480 return 0;
2481 }
2482
kvm_mmu_page_unlink_children(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list)2483 static int kvm_mmu_page_unlink_children(struct kvm *kvm,
2484 struct kvm_mmu_page *sp,
2485 struct list_head *invalid_list)
2486 {
2487 int zapped = 0;
2488 unsigned i;
2489
2490 for (i = 0; i < SPTE_ENT_PER_PAGE; ++i)
2491 zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
2492
2493 return zapped;
2494 }
2495
kvm_mmu_unlink_parents(struct kvm * kvm,struct kvm_mmu_page * sp)2496 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2497 {
2498 u64 *sptep;
2499 struct rmap_iterator iter;
2500
2501 while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2502 drop_parent_pte(kvm, sp, sptep);
2503 }
2504
mmu_zap_unsync_children(struct kvm * kvm,struct kvm_mmu_page * parent,struct list_head * invalid_list)2505 static int mmu_zap_unsync_children(struct kvm *kvm,
2506 struct kvm_mmu_page *parent,
2507 struct list_head *invalid_list)
2508 {
2509 int i, zapped = 0;
2510 struct mmu_page_path parents;
2511 struct kvm_mmu_pages pages;
2512
2513 if (parent->role.level == PG_LEVEL_4K)
2514 return 0;
2515
2516 while (mmu_unsync_walk(parent, &pages)) {
2517 struct kvm_mmu_page *sp;
2518
2519 for_each_sp(pages, sp, parents, i) {
2520 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2521 mmu_pages_clear_parents(&parents);
2522 zapped++;
2523 }
2524 }
2525
2526 return zapped;
2527 }
2528
__kvm_mmu_prepare_zap_page(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list,int * nr_zapped)2529 static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2530 struct kvm_mmu_page *sp,
2531 struct list_head *invalid_list,
2532 int *nr_zapped)
2533 {
2534 bool list_unstable, zapped_root = false;
2535
2536 lockdep_assert_held_write(&kvm->mmu_lock);
2537 trace_kvm_mmu_prepare_zap_page(sp);
2538 ++kvm->stat.mmu_shadow_zapped;
2539 *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
2540 *nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2541 kvm_mmu_unlink_parents(kvm, sp);
2542
2543 /* Zapping children means active_mmu_pages has become unstable. */
2544 list_unstable = *nr_zapped;
2545
2546 if (!sp->role.invalid && sp_has_gptes(sp))
2547 unaccount_shadowed(kvm, sp);
2548
2549 if (sp->unsync)
2550 kvm_unlink_unsync_page(kvm, sp);
2551 if (!sp->root_count) {
2552 /* Count self */
2553 (*nr_zapped)++;
2554
2555 /*
2556 * Already invalid pages (previously active roots) are not on
2557 * the active page list. See list_del() in the "else" case of
2558 * !sp->root_count.
2559 */
2560 if (sp->role.invalid)
2561 list_add(&sp->link, invalid_list);
2562 else
2563 list_move(&sp->link, invalid_list);
2564 kvm_unaccount_mmu_page(kvm, sp);
2565 } else {
2566 /*
2567 * Remove the active root from the active page list, the root
2568 * will be explicitly freed when the root_count hits zero.
2569 */
2570 list_del(&sp->link);
2571
2572 /*
2573 * Obsolete pages cannot be used on any vCPUs, see the comment
2574 * in kvm_mmu_zap_all_fast(). Note, is_obsolete_sp() also
2575 * treats invalid shadow pages as being obsolete.
2576 */
2577 zapped_root = !is_obsolete_sp(kvm, sp);
2578 }
2579
2580 if (sp->nx_huge_page_disallowed)
2581 unaccount_nx_huge_page(kvm, sp);
2582
2583 sp->role.invalid = 1;
2584
2585 /*
2586 * Make the request to free obsolete roots after marking the root
2587 * invalid, otherwise other vCPUs may not see it as invalid.
2588 */
2589 if (zapped_root)
2590 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS);
2591 return list_unstable;
2592 }
2593
kvm_mmu_prepare_zap_page(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list)2594 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2595 struct list_head *invalid_list)
2596 {
2597 int nr_zapped;
2598
2599 __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2600 return nr_zapped;
2601 }
2602
kvm_mmu_commit_zap_page(struct kvm * kvm,struct list_head * invalid_list)2603 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2604 struct list_head *invalid_list)
2605 {
2606 struct kvm_mmu_page *sp, *nsp;
2607
2608 if (list_empty(invalid_list))
2609 return;
2610
2611 /*
2612 * We need to make sure everyone sees our modifications to
2613 * the page tables and see changes to vcpu->mode here. The barrier
2614 * in the kvm_flush_remote_tlbs() achieves this. This pairs
2615 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2616 *
2617 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2618 * guest mode and/or lockless shadow page table walks.
2619 */
2620 kvm_flush_remote_tlbs(kvm);
2621
2622 list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2623 WARN_ON_ONCE(!sp->role.invalid || sp->root_count);
2624 kvm_mmu_free_shadow_page(sp);
2625 }
2626 }
2627
kvm_mmu_zap_oldest_mmu_pages(struct kvm * kvm,unsigned long nr_to_zap)2628 static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
2629 unsigned long nr_to_zap)
2630 {
2631 unsigned long total_zapped = 0;
2632 struct kvm_mmu_page *sp, *tmp;
2633 LIST_HEAD(invalid_list);
2634 bool unstable;
2635 int nr_zapped;
2636
2637 if (list_empty(&kvm->arch.active_mmu_pages))
2638 return 0;
2639
2640 restart:
2641 list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2642 /*
2643 * Don't zap active root pages, the page itself can't be freed
2644 * and zapping it will just force vCPUs to realloc and reload.
2645 */
2646 if (sp->root_count)
2647 continue;
2648
2649 unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
2650 &nr_zapped);
2651 total_zapped += nr_zapped;
2652 if (total_zapped >= nr_to_zap)
2653 break;
2654
2655 if (unstable)
2656 goto restart;
2657 }
2658
2659 kvm_mmu_commit_zap_page(kvm, &invalid_list);
2660
2661 kvm->stat.mmu_recycled += total_zapped;
2662 return total_zapped;
2663 }
2664
kvm_mmu_available_pages(struct kvm * kvm)2665 static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2666 {
2667 if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2668 return kvm->arch.n_max_mmu_pages -
2669 kvm->arch.n_used_mmu_pages;
2670
2671 return 0;
2672 }
2673
make_mmu_pages_available(struct kvm_vcpu * vcpu)2674 static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2675 {
2676 unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2677
2678 if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2679 return 0;
2680
2681 kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2682
2683 /*
2684 * Note, this check is intentionally soft, it only guarantees that one
2685 * page is available, while the caller may end up allocating as many as
2686 * four pages, e.g. for PAE roots or for 5-level paging. Temporarily
2687 * exceeding the (arbitrary by default) limit will not harm the host,
2688 * being too aggressive may unnecessarily kill the guest, and getting an
2689 * exact count is far more trouble than it's worth, especially in the
2690 * page fault paths.
2691 */
2692 if (!kvm_mmu_available_pages(vcpu->kvm))
2693 return -ENOSPC;
2694 return 0;
2695 }
2696
2697 /*
2698 * Changing the number of mmu pages allocated to the vm
2699 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2700 */
kvm_mmu_change_mmu_pages(struct kvm * kvm,unsigned long goal_nr_mmu_pages)2701 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2702 {
2703 write_lock(&kvm->mmu_lock);
2704
2705 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2706 kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
2707 goal_nr_mmu_pages);
2708
2709 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2710 }
2711
2712 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2713
2714 write_unlock(&kvm->mmu_lock);
2715 }
2716
kvm_mmu_unprotect_page(struct kvm * kvm,gfn_t gfn)2717 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2718 {
2719 struct kvm_mmu_page *sp;
2720 LIST_HEAD(invalid_list);
2721 int r;
2722
2723 r = 0;
2724 write_lock(&kvm->mmu_lock);
2725 for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
2726 r = 1;
2727 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2728 }
2729 kvm_mmu_commit_zap_page(kvm, &invalid_list);
2730 write_unlock(&kvm->mmu_lock);
2731
2732 return r;
2733 }
2734
kvm_mmu_unprotect_page_virt(struct kvm_vcpu * vcpu,gva_t gva)2735 static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2736 {
2737 gpa_t gpa;
2738 int r;
2739
2740 if (vcpu->arch.mmu->root_role.direct)
2741 return 0;
2742
2743 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
2744
2745 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2746
2747 return r;
2748 }
2749
kvm_unsync_page(struct kvm * kvm,struct kvm_mmu_page * sp)2750 static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
2751 {
2752 trace_kvm_mmu_unsync_page(sp);
2753 ++kvm->stat.mmu_unsync;
2754 sp->unsync = 1;
2755
2756 kvm_mmu_mark_parents_unsync(sp);
2757 }
2758
2759 /*
2760 * Attempt to unsync any shadow pages that can be reached by the specified gfn,
2761 * KVM is creating a writable mapping for said gfn. Returns 0 if all pages
2762 * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
2763 * be write-protected.
2764 */
mmu_try_to_unsync_pages(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn,bool can_unsync,bool prefetch)2765 int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
2766 gfn_t gfn, bool can_unsync, bool prefetch)
2767 {
2768 struct kvm_mmu_page *sp;
2769 bool locked = false;
2770
2771 /*
2772 * Force write-protection if the page is being tracked. Note, the page
2773 * track machinery is used to write-protect upper-level shadow pages,
2774 * i.e. this guards the role.level == 4K assertion below!
2775 */
2776 if (kvm_gfn_is_write_tracked(kvm, slot, gfn))
2777 return -EPERM;
2778
2779 /*
2780 * The page is not write-tracked, mark existing shadow pages unsync
2781 * unless KVM is synchronizing an unsync SP (can_unsync = false). In
2782 * that case, KVM must complete emulation of the guest TLB flush before
2783 * allowing shadow pages to become unsync (writable by the guest).
2784 */
2785 for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
2786 if (!can_unsync)
2787 return -EPERM;
2788
2789 if (sp->unsync)
2790 continue;
2791
2792 if (prefetch)
2793 return -EEXIST;
2794
2795 /*
2796 * TDP MMU page faults require an additional spinlock as they
2797 * run with mmu_lock held for read, not write, and the unsync
2798 * logic is not thread safe. Take the spinklock regardless of
2799 * the MMU type to avoid extra conditionals/parameters, there's
2800 * no meaningful penalty if mmu_lock is held for write.
2801 */
2802 if (!locked) {
2803 locked = true;
2804 spin_lock(&kvm->arch.mmu_unsync_pages_lock);
2805
2806 /*
2807 * Recheck after taking the spinlock, a different vCPU
2808 * may have since marked the page unsync. A false
2809 * negative on the unprotected check above is not
2810 * possible as clearing sp->unsync _must_ hold mmu_lock
2811 * for write, i.e. unsync cannot transition from 1->0
2812 * while this CPU holds mmu_lock for read (or write).
2813 */
2814 if (READ_ONCE(sp->unsync))
2815 continue;
2816 }
2817
2818 WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K);
2819 kvm_unsync_page(kvm, sp);
2820 }
2821 if (locked)
2822 spin_unlock(&kvm->arch.mmu_unsync_pages_lock);
2823
2824 /*
2825 * We need to ensure that the marking of unsync pages is visible
2826 * before the SPTE is updated to allow writes because
2827 * kvm_mmu_sync_roots() checks the unsync flags without holding
2828 * the MMU lock and so can race with this. If the SPTE was updated
2829 * before the page had been marked as unsync-ed, something like the
2830 * following could happen:
2831 *
2832 * CPU 1 CPU 2
2833 * ---------------------------------------------------------------------
2834 * 1.2 Host updates SPTE
2835 * to be writable
2836 * 2.1 Guest writes a GPTE for GVA X.
2837 * (GPTE being in the guest page table shadowed
2838 * by the SP from CPU 1.)
2839 * This reads SPTE during the page table walk.
2840 * Since SPTE.W is read as 1, there is no
2841 * fault.
2842 *
2843 * 2.2 Guest issues TLB flush.
2844 * That causes a VM Exit.
2845 *
2846 * 2.3 Walking of unsync pages sees sp->unsync is
2847 * false and skips the page.
2848 *
2849 * 2.4 Guest accesses GVA X.
2850 * Since the mapping in the SP was not updated,
2851 * so the old mapping for GVA X incorrectly
2852 * gets used.
2853 * 1.1 Host marks SP
2854 * as unsync
2855 * (sp->unsync = true)
2856 *
2857 * The write barrier below ensures that 1.1 happens before 1.2 and thus
2858 * the situation in 2.4 does not arise. It pairs with the read barrier
2859 * in is_unsync_root(), placed between 2.1's load of SPTE.W and 2.3.
2860 */
2861 smp_wmb();
2862
2863 return 0;
2864 }
2865
mmu_set_spte(struct kvm_vcpu * vcpu,struct kvm_memory_slot * slot,u64 * sptep,unsigned int pte_access,gfn_t gfn,kvm_pfn_t pfn,struct kvm_page_fault * fault)2866 static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
2867 u64 *sptep, unsigned int pte_access, gfn_t gfn,
2868 kvm_pfn_t pfn, struct kvm_page_fault *fault)
2869 {
2870 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
2871 int level = sp->role.level;
2872 int was_rmapped = 0;
2873 int ret = RET_PF_FIXED;
2874 bool flush = false;
2875 bool wrprot;
2876 u64 spte;
2877
2878 /* Prefetching always gets a writable pfn. */
2879 bool host_writable = !fault || fault->map_writable;
2880 bool prefetch = !fault || fault->prefetch;
2881 bool write_fault = fault && fault->write;
2882
2883 if (unlikely(is_noslot_pfn(pfn))) {
2884 vcpu->stat.pf_mmio_spte_created++;
2885 mark_mmio_spte(vcpu, sptep, gfn, pte_access);
2886 return RET_PF_EMULATE;
2887 }
2888
2889 if (is_shadow_present_pte(*sptep)) {
2890 /*
2891 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2892 * the parent of the now unreachable PTE.
2893 */
2894 if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
2895 struct kvm_mmu_page *child;
2896 u64 pte = *sptep;
2897
2898 child = spte_to_child_sp(pte);
2899 drop_parent_pte(vcpu->kvm, child, sptep);
2900 flush = true;
2901 } else if (pfn != spte_to_pfn(*sptep)) {
2902 drop_spte(vcpu->kvm, sptep);
2903 flush = true;
2904 } else
2905 was_rmapped = 1;
2906 }
2907
2908 wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
2909 true, host_writable, &spte);
2910
2911 if (*sptep == spte) {
2912 ret = RET_PF_SPURIOUS;
2913 } else {
2914 flush |= mmu_spte_update(sptep, spte);
2915 trace_kvm_mmu_set_spte(level, gfn, sptep);
2916 }
2917
2918 if (wrprot) {
2919 if (write_fault)
2920 ret = RET_PF_EMULATE;
2921 }
2922
2923 if (flush)
2924 kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level);
2925
2926 if (!was_rmapped) {
2927 WARN_ON_ONCE(ret == RET_PF_SPURIOUS);
2928 rmap_add(vcpu, slot, sptep, gfn, pte_access);
2929 } else {
2930 /* Already rmapped but the pte_access bits may have changed. */
2931 kvm_mmu_page_set_access(sp, spte_index(sptep), pte_access);
2932 }
2933
2934 return ret;
2935 }
2936
direct_pte_prefetch_many(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * start,u64 * end)2937 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2938 struct kvm_mmu_page *sp,
2939 u64 *start, u64 *end)
2940 {
2941 struct page *pages[PTE_PREFETCH_NUM];
2942 struct kvm_memory_slot *slot;
2943 unsigned int access = sp->role.access;
2944 int i, ret;
2945 gfn_t gfn;
2946
2947 gfn = kvm_mmu_page_get_gfn(sp, spte_index(start));
2948 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2949 if (!slot)
2950 return -1;
2951
2952 ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2953 if (ret <= 0)
2954 return -1;
2955
2956 for (i = 0; i < ret; i++, gfn++, start++) {
2957 mmu_set_spte(vcpu, slot, start, access, gfn,
2958 page_to_pfn(pages[i]), NULL);
2959 put_page(pages[i]);
2960 }
2961
2962 return 0;
2963 }
2964
__direct_pte_prefetch(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * sptep)2965 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2966 struct kvm_mmu_page *sp, u64 *sptep)
2967 {
2968 u64 *spte, *start = NULL;
2969 int i;
2970
2971 WARN_ON_ONCE(!sp->role.direct);
2972
2973 i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1);
2974 spte = sp->spt + i;
2975
2976 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2977 if (is_shadow_present_pte(*spte) || spte == sptep) {
2978 if (!start)
2979 continue;
2980 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2981 return;
2982 start = NULL;
2983 } else if (!start)
2984 start = spte;
2985 }
2986 if (start)
2987 direct_pte_prefetch_many(vcpu, sp, start, spte);
2988 }
2989
direct_pte_prefetch(struct kvm_vcpu * vcpu,u64 * sptep)2990 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2991 {
2992 struct kvm_mmu_page *sp;
2993
2994 sp = sptep_to_sp(sptep);
2995
2996 /*
2997 * Without accessed bits, there's no way to distinguish between
2998 * actually accessed translations and prefetched, so disable pte
2999 * prefetch if accessed bits aren't available.
3000 */
3001 if (sp_ad_disabled(sp))
3002 return;
3003
3004 if (sp->role.level > PG_LEVEL_4K)
3005 return;
3006
3007 /*
3008 * If addresses are being invalidated, skip prefetching to avoid
3009 * accidentally prefetching those addresses.
3010 */
3011 if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
3012 return;
3013
3014 __direct_pte_prefetch(vcpu, sp, sptep);
3015 }
3016
3017 /*
3018 * Lookup the mapping level for @gfn in the current mm.
3019 *
3020 * WARNING! Use of host_pfn_mapping_level() requires the caller and the end
3021 * consumer to be tied into KVM's handlers for MMU notifier events!
3022 *
3023 * There are several ways to safely use this helper:
3024 *
3025 * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before
3026 * consuming it. In this case, mmu_lock doesn't need to be held during the
3027 * lookup, but it does need to be held while checking the MMU notifier.
3028 *
3029 * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation
3030 * event for the hva. This can be done by explicit checking the MMU notifier
3031 * or by ensuring that KVM already has a valid mapping that covers the hva.
3032 *
3033 * - Do not use the result to install new mappings, e.g. use the host mapping
3034 * level only to decide whether or not to zap an entry. In this case, it's
3035 * not required to hold mmu_lock (though it's highly likely the caller will
3036 * want to hold mmu_lock anyways, e.g. to modify SPTEs).
3037 *
3038 * Note! The lookup can still race with modifications to host page tables, but
3039 * the above "rules" ensure KVM will not _consume_ the result of the walk if a
3040 * race with the primary MMU occurs.
3041 */
host_pfn_mapping_level(struct kvm * kvm,gfn_t gfn,const struct kvm_memory_slot * slot)3042 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
3043 const struct kvm_memory_slot *slot)
3044 {
3045 int level = PG_LEVEL_4K;
3046 unsigned long hva;
3047 unsigned long flags;
3048 pgd_t pgd;
3049 p4d_t p4d;
3050 pud_t pud;
3051 pmd_t pmd;
3052
3053 /*
3054 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
3055 * is not solely for performance, it's also necessary to avoid the
3056 * "writable" check in __gfn_to_hva_many(), which will always fail on
3057 * read-only memslots due to gfn_to_hva() assuming writes. Earlier
3058 * page fault steps have already verified the guest isn't writing a
3059 * read-only memslot.
3060 */
3061 hva = __gfn_to_hva_memslot(slot, gfn);
3062
3063 /*
3064 * Disable IRQs to prevent concurrent tear down of host page tables,
3065 * e.g. if the primary MMU promotes a P*D to a huge page and then frees
3066 * the original page table.
3067 */
3068 local_irq_save(flags);
3069
3070 /*
3071 * Read each entry once. As above, a non-leaf entry can be promoted to
3072 * a huge page _during_ this walk. Re-reading the entry could send the
3073 * walk into the weeks, e.g. p*d_leaf() returns false (sees the old
3074 * value) and then p*d_offset() walks into the target huge page instead
3075 * of the old page table (sees the new value).
3076 */
3077 pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
3078 if (pgd_none(pgd))
3079 goto out;
3080
3081 p4d = READ_ONCE(*p4d_offset(&pgd, hva));
3082 if (p4d_none(p4d) || !p4d_present(p4d))
3083 goto out;
3084
3085 pud = READ_ONCE(*pud_offset(&p4d, hva));
3086 if (pud_none(pud) || !pud_present(pud))
3087 goto out;
3088
3089 if (pud_leaf(pud)) {
3090 level = PG_LEVEL_1G;
3091 goto out;
3092 }
3093
3094 pmd = READ_ONCE(*pmd_offset(&pud, hva));
3095 if (pmd_none(pmd) || !pmd_present(pmd))
3096 goto out;
3097
3098 if (pmd_leaf(pmd))
3099 level = PG_LEVEL_2M;
3100
3101 out:
3102 local_irq_restore(flags);
3103 return level;
3104 }
3105
__kvm_mmu_max_mapping_level(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn,int max_level,bool is_private)3106 static int __kvm_mmu_max_mapping_level(struct kvm *kvm,
3107 const struct kvm_memory_slot *slot,
3108 gfn_t gfn, int max_level, bool is_private)
3109 {
3110 struct kvm_lpage_info *linfo;
3111 int host_level;
3112
3113 max_level = min(max_level, max_huge_page_level);
3114 for ( ; max_level > PG_LEVEL_4K; max_level--) {
3115 linfo = lpage_info_slot(gfn, slot, max_level);
3116 if (!linfo->disallow_lpage)
3117 break;
3118 }
3119
3120 if (is_private)
3121 return max_level;
3122
3123 if (max_level == PG_LEVEL_4K)
3124 return PG_LEVEL_4K;
3125
3126 host_level = host_pfn_mapping_level(kvm, gfn, slot);
3127 return min(host_level, max_level);
3128 }
3129
kvm_mmu_max_mapping_level(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn,int max_level)3130 int kvm_mmu_max_mapping_level(struct kvm *kvm,
3131 const struct kvm_memory_slot *slot, gfn_t gfn,
3132 int max_level)
3133 {
3134 bool is_private = kvm_slot_can_be_private(slot) &&
3135 kvm_mem_is_private(kvm, gfn);
3136
3137 return __kvm_mmu_max_mapping_level(kvm, slot, gfn, max_level, is_private);
3138 }
3139
kvm_mmu_hugepage_adjust(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3140 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3141 {
3142 struct kvm_memory_slot *slot = fault->slot;
3143 kvm_pfn_t mask;
3144
3145 fault->huge_page_disallowed = fault->exec && fault->nx_huge_page_workaround_enabled;
3146
3147 if (unlikely(fault->max_level == PG_LEVEL_4K))
3148 return;
3149
3150 if (is_error_noslot_pfn(fault->pfn))
3151 return;
3152
3153 if (kvm_slot_dirty_track_enabled(slot))
3154 return;
3155
3156 /*
3157 * Enforce the iTLB multihit workaround after capturing the requested
3158 * level, which will be used to do precise, accurate accounting.
3159 */
3160 fault->req_level = __kvm_mmu_max_mapping_level(vcpu->kvm, slot,
3161 fault->gfn, fault->max_level,
3162 fault->is_private);
3163 if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
3164 return;
3165
3166 /*
3167 * mmu_invalidate_retry() was successful and mmu_lock is held, so
3168 * the pmd can't be split from under us.
3169 */
3170 fault->goal_level = fault->req_level;
3171 mask = KVM_PAGES_PER_HPAGE(fault->goal_level) - 1;
3172 VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask));
3173 fault->pfn &= ~mask;
3174 }
3175
disallowed_hugepage_adjust(struct kvm_page_fault * fault,u64 spte,int cur_level)3176 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
3177 {
3178 if (cur_level > PG_LEVEL_4K &&
3179 cur_level == fault->goal_level &&
3180 is_shadow_present_pte(spte) &&
3181 !is_large_pte(spte) &&
3182 spte_to_child_sp(spte)->nx_huge_page_disallowed) {
3183 /*
3184 * A small SPTE exists for this pfn, but FNAME(fetch),
3185 * direct_map(), or kvm_tdp_mmu_map() would like to create a
3186 * large PTE instead: just force them to go down another level,
3187 * patching back for them into pfn the next 9 bits of the
3188 * address.
3189 */
3190 u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) -
3191 KVM_PAGES_PER_HPAGE(cur_level - 1);
3192 fault->pfn |= fault->gfn & page_mask;
3193 fault->goal_level--;
3194 }
3195 }
3196
direct_map(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3197 static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3198 {
3199 struct kvm_shadow_walk_iterator it;
3200 struct kvm_mmu_page *sp;
3201 int ret;
3202 gfn_t base_gfn = fault->gfn;
3203
3204 kvm_mmu_hugepage_adjust(vcpu, fault);
3205
3206 trace_kvm_mmu_spte_requested(fault);
3207 for_each_shadow_entry(vcpu, fault->addr, it) {
3208 /*
3209 * We cannot overwrite existing page tables with an NX
3210 * large page, as the leaf could be executable.
3211 */
3212 if (fault->nx_huge_page_workaround_enabled)
3213 disallowed_hugepage_adjust(fault, *it.sptep, it.level);
3214
3215 base_gfn = gfn_round_for_level(fault->gfn, it.level);
3216 if (it.level == fault->goal_level)
3217 break;
3218
3219 sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn, true, ACC_ALL);
3220 if (sp == ERR_PTR(-EEXIST))
3221 continue;
3222
3223 link_shadow_page(vcpu, it.sptep, sp);
3224 if (fault->huge_page_disallowed)
3225 account_nx_huge_page(vcpu->kvm, sp,
3226 fault->req_level >= it.level);
3227 }
3228
3229 if (WARN_ON_ONCE(it.level != fault->goal_level))
3230 return -EFAULT;
3231
3232 ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL,
3233 base_gfn, fault->pfn, fault);
3234 if (ret == RET_PF_SPURIOUS)
3235 return ret;
3236
3237 direct_pte_prefetch(vcpu, it.sptep);
3238 return ret;
3239 }
3240
kvm_send_hwpoison_signal(struct kvm_memory_slot * slot,gfn_t gfn)3241 static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn)
3242 {
3243 unsigned long hva = gfn_to_hva_memslot(slot, gfn);
3244
3245 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, PAGE_SHIFT, current);
3246 }
3247
kvm_handle_error_pfn(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3248 static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3249 {
3250 if (is_sigpending_pfn(fault->pfn)) {
3251 kvm_handle_signal_exit(vcpu);
3252 return -EINTR;
3253 }
3254
3255 /*
3256 * Do not cache the mmio info caused by writing the readonly gfn
3257 * into the spte otherwise read access on readonly gfn also can
3258 * caused mmio page fault and treat it as mmio access.
3259 */
3260 if (fault->pfn == KVM_PFN_ERR_RO_FAULT)
3261 return RET_PF_EMULATE;
3262
3263 if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
3264 kvm_send_hwpoison_signal(fault->slot, fault->gfn);
3265 return RET_PF_RETRY;
3266 }
3267
3268 return -EFAULT;
3269 }
3270
kvm_handle_noslot_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,unsigned int access)3271 static int kvm_handle_noslot_fault(struct kvm_vcpu *vcpu,
3272 struct kvm_page_fault *fault,
3273 unsigned int access)
3274 {
3275 gva_t gva = fault->is_tdp ? 0 : fault->addr;
3276
3277 if (fault->is_private) {
3278 kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
3279 return -EFAULT;
3280 }
3281
3282 vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
3283 access & shadow_mmio_access_mask);
3284
3285 fault->slot = NULL;
3286 fault->pfn = KVM_PFN_NOSLOT;
3287 fault->map_writable = false;
3288 fault->hva = KVM_HVA_ERR_BAD;
3289
3290 /*
3291 * If MMIO caching is disabled, emulate immediately without
3292 * touching the shadow page tables as attempting to install an
3293 * MMIO SPTE will just be an expensive nop.
3294 */
3295 if (unlikely(!enable_mmio_caching))
3296 return RET_PF_EMULATE;
3297
3298 /*
3299 * Do not create an MMIO SPTE for a gfn greater than host.MAXPHYADDR,
3300 * any guest that generates such gfns is running nested and is being
3301 * tricked by L0 userspace (you can observe gfn > L1.MAXPHYADDR if and
3302 * only if L1's MAXPHYADDR is inaccurate with respect to the
3303 * hardware's).
3304 */
3305 if (unlikely(fault->gfn > kvm_mmu_max_gfn()))
3306 return RET_PF_EMULATE;
3307
3308 return RET_PF_CONTINUE;
3309 }
3310
page_fault_can_be_fast(struct kvm_page_fault * fault)3311 static bool page_fault_can_be_fast(struct kvm_page_fault *fault)
3312 {
3313 /*
3314 * Page faults with reserved bits set, i.e. faults on MMIO SPTEs, only
3315 * reach the common page fault handler if the SPTE has an invalid MMIO
3316 * generation number. Refreshing the MMIO generation needs to go down
3317 * the slow path. Note, EPT Misconfigs do NOT set the PRESENT flag!
3318 */
3319 if (fault->rsvd)
3320 return false;
3321
3322 /*
3323 * #PF can be fast if:
3324 *
3325 * 1. The shadow page table entry is not present and A/D bits are
3326 * disabled _by KVM_, which could mean that the fault is potentially
3327 * caused by access tracking (if enabled). If A/D bits are enabled
3328 * by KVM, but disabled by L1 for L2, KVM is forced to disable A/D
3329 * bits for L2 and employ access tracking, but the fast page fault
3330 * mechanism only supports direct MMUs.
3331 * 2. The shadow page table entry is present, the access is a write,
3332 * and no reserved bits are set (MMIO SPTEs cannot be "fixed"), i.e.
3333 * the fault was caused by a write-protection violation. If the
3334 * SPTE is MMU-writable (determined later), the fault can be fixed
3335 * by setting the Writable bit, which can be done out of mmu_lock.
3336 */
3337 if (!fault->present)
3338 return !kvm_ad_enabled();
3339
3340 /*
3341 * Note, instruction fetches and writes are mutually exclusive, ignore
3342 * the "exec" flag.
3343 */
3344 return fault->write;
3345 }
3346
3347 /*
3348 * Returns true if the SPTE was fixed successfully. Otherwise,
3349 * someone else modified the SPTE from its original value.
3350 */
fast_pf_fix_direct_spte(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,u64 * sptep,u64 old_spte,u64 new_spte)3351 static bool fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu,
3352 struct kvm_page_fault *fault,
3353 u64 *sptep, u64 old_spte, u64 new_spte)
3354 {
3355 /*
3356 * Theoretically we could also set dirty bit (and flush TLB) here in
3357 * order to eliminate unnecessary PML logging. See comments in
3358 * set_spte. But fast_page_fault is very unlikely to happen with PML
3359 * enabled, so we do not do this. This might result in the same GPA
3360 * to be logged in PML buffer again when the write really happens, and
3361 * eventually to be called by mark_page_dirty twice. But it's also no
3362 * harm. This also avoids the TLB flush needed after setting dirty bit
3363 * so non-PML cases won't be impacted.
3364 *
3365 * Compare with set_spte where instead shadow_dirty_mask is set.
3366 */
3367 if (!try_cmpxchg64(sptep, &old_spte, new_spte))
3368 return false;
3369
3370 if (is_writable_pte(new_spte) && !is_writable_pte(old_spte))
3371 mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn);
3372
3373 return true;
3374 }
3375
is_access_allowed(struct kvm_page_fault * fault,u64 spte)3376 static bool is_access_allowed(struct kvm_page_fault *fault, u64 spte)
3377 {
3378 if (fault->exec)
3379 return is_executable_pte(spte);
3380
3381 if (fault->write)
3382 return is_writable_pte(spte);
3383
3384 /* Fault was on Read access */
3385 return spte & PT_PRESENT_MASK;
3386 }
3387
3388 /*
3389 * Returns the last level spte pointer of the shadow page walk for the given
3390 * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
3391 * walk could be performed, returns NULL and *spte does not contain valid data.
3392 *
3393 * Contract:
3394 * - Must be called between walk_shadow_page_lockless_{begin,end}.
3395 * - The returned sptep must not be used after walk_shadow_page_lockless_end.
3396 */
fast_pf_get_last_sptep(struct kvm_vcpu * vcpu,gpa_t gpa,u64 * spte)3397 static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
3398 {
3399 struct kvm_shadow_walk_iterator iterator;
3400 u64 old_spte;
3401 u64 *sptep = NULL;
3402
3403 for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
3404 sptep = iterator.sptep;
3405 *spte = old_spte;
3406 }
3407
3408 return sptep;
3409 }
3410
3411 /*
3412 * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3413 */
fast_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3414 static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3415 {
3416 struct kvm_mmu_page *sp;
3417 int ret = RET_PF_INVALID;
3418 u64 spte;
3419 u64 *sptep;
3420 uint retry_count = 0;
3421
3422 if (!page_fault_can_be_fast(fault))
3423 return ret;
3424
3425 walk_shadow_page_lockless_begin(vcpu);
3426
3427 do {
3428 u64 new_spte;
3429
3430 if (tdp_mmu_enabled)
3431 sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3432 else
3433 sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3434
3435 /*
3436 * It's entirely possible for the mapping to have been zapped
3437 * by a different task, but the root page should always be
3438 * available as the vCPU holds a reference to its root(s).
3439 */
3440 if (WARN_ON_ONCE(!sptep))
3441 spte = REMOVED_SPTE;
3442
3443 if (!is_shadow_present_pte(spte))
3444 break;
3445
3446 sp = sptep_to_sp(sptep);
3447 if (!is_last_spte(spte, sp->role.level))
3448 break;
3449
3450 /*
3451 * Check whether the memory access that caused the fault would
3452 * still cause it if it were to be performed right now. If not,
3453 * then this is a spurious fault caused by TLB lazily flushed,
3454 * or some other CPU has already fixed the PTE after the
3455 * current CPU took the fault.
3456 *
3457 * Need not check the access of upper level table entries since
3458 * they are always ACC_ALL.
3459 */
3460 if (is_access_allowed(fault, spte)) {
3461 ret = RET_PF_SPURIOUS;
3462 break;
3463 }
3464
3465 new_spte = spte;
3466
3467 /*
3468 * KVM only supports fixing page faults outside of MMU lock for
3469 * direct MMUs, nested MMUs are always indirect, and KVM always
3470 * uses A/D bits for non-nested MMUs. Thus, if A/D bits are
3471 * enabled, the SPTE can't be an access-tracked SPTE.
3472 */
3473 if (unlikely(!kvm_ad_enabled()) && is_access_track_spte(spte))
3474 new_spte = restore_acc_track_spte(new_spte);
3475
3476 /*
3477 * To keep things simple, only SPTEs that are MMU-writable can
3478 * be made fully writable outside of mmu_lock, e.g. only SPTEs
3479 * that were write-protected for dirty-logging or access
3480 * tracking are handled here. Don't bother checking if the
3481 * SPTE is writable to prioritize running with A/D bits enabled.
3482 * The is_access_allowed() check above handles the common case
3483 * of the fault being spurious, and the SPTE is known to be
3484 * shadow-present, i.e. except for access tracking restoration
3485 * making the new SPTE writable, the check is wasteful.
3486 */
3487 if (fault->write && is_mmu_writable_spte(spte)) {
3488 new_spte |= PT_WRITABLE_MASK;
3489
3490 /*
3491 * Do not fix write-permission on the large spte when
3492 * dirty logging is enabled. Since we only dirty the
3493 * first page into the dirty-bitmap in
3494 * fast_pf_fix_direct_spte(), other pages are missed
3495 * if its slot has dirty logging enabled.
3496 *
3497 * Instead, we let the slow page fault path create a
3498 * normal spte to fix the access.
3499 */
3500 if (sp->role.level > PG_LEVEL_4K &&
3501 kvm_slot_dirty_track_enabled(fault->slot))
3502 break;
3503 }
3504
3505 /* Verify that the fault can be handled in the fast path */
3506 if (new_spte == spte ||
3507 !is_access_allowed(fault, new_spte))
3508 break;
3509
3510 /*
3511 * Currently, fast page fault only works for direct mapping
3512 * since the gfn is not stable for indirect shadow page. See
3513 * Documentation/virt/kvm/locking.rst to get more detail.
3514 */
3515 if (fast_pf_fix_direct_spte(vcpu, fault, sptep, spte, new_spte)) {
3516 ret = RET_PF_FIXED;
3517 break;
3518 }
3519
3520 if (++retry_count > 4) {
3521 pr_warn_once("Fast #PF retrying more than 4 times.\n");
3522 break;
3523 }
3524
3525 } while (true);
3526
3527 trace_fast_page_fault(vcpu, fault, sptep, spte, ret);
3528 walk_shadow_page_lockless_end(vcpu);
3529
3530 if (ret != RET_PF_INVALID)
3531 vcpu->stat.pf_fast++;
3532
3533 return ret;
3534 }
3535
mmu_free_root_page(struct kvm * kvm,hpa_t * root_hpa,struct list_head * invalid_list)3536 static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3537 struct list_head *invalid_list)
3538 {
3539 struct kvm_mmu_page *sp;
3540
3541 if (!VALID_PAGE(*root_hpa))
3542 return;
3543
3544 sp = root_to_sp(*root_hpa);
3545 if (WARN_ON_ONCE(!sp))
3546 return;
3547
3548 if (is_tdp_mmu_page(sp)) {
3549 lockdep_assert_held_read(&kvm->mmu_lock);
3550 kvm_tdp_mmu_put_root(kvm, sp);
3551 } else {
3552 lockdep_assert_held_write(&kvm->mmu_lock);
3553 if (!--sp->root_count && sp->role.invalid)
3554 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3555 }
3556
3557 *root_hpa = INVALID_PAGE;
3558 }
3559
3560 /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
kvm_mmu_free_roots(struct kvm * kvm,struct kvm_mmu * mmu,ulong roots_to_free)3561 void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
3562 ulong roots_to_free)
3563 {
3564 bool is_tdp_mmu = tdp_mmu_enabled && mmu->root_role.direct;
3565 int i;
3566 LIST_HEAD(invalid_list);
3567 bool free_active_root;
3568
3569 WARN_ON_ONCE(roots_to_free & ~KVM_MMU_ROOTS_ALL);
3570
3571 BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3572
3573 /* Before acquiring the MMU lock, see if we need to do any real work. */
3574 free_active_root = (roots_to_free & KVM_MMU_ROOT_CURRENT)
3575 && VALID_PAGE(mmu->root.hpa);
3576
3577 if (!free_active_root) {
3578 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3579 if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3580 VALID_PAGE(mmu->prev_roots[i].hpa))
3581 break;
3582
3583 if (i == KVM_MMU_NUM_PREV_ROOTS)
3584 return;
3585 }
3586
3587 if (is_tdp_mmu)
3588 read_lock(&kvm->mmu_lock);
3589 else
3590 write_lock(&kvm->mmu_lock);
3591
3592 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3593 if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
3594 mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3595 &invalid_list);
3596
3597 if (free_active_root) {
3598 if (kvm_mmu_is_dummy_root(mmu->root.hpa)) {
3599 /* Nothing to cleanup for dummy roots. */
3600 } else if (root_to_sp(mmu->root.hpa)) {
3601 mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list);
3602 } else if (mmu->pae_root) {
3603 for (i = 0; i < 4; ++i) {
3604 if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
3605 continue;
3606
3607 mmu_free_root_page(kvm, &mmu->pae_root[i],
3608 &invalid_list);
3609 mmu->pae_root[i] = INVALID_PAE_ROOT;
3610 }
3611 }
3612 mmu->root.hpa = INVALID_PAGE;
3613 mmu->root.pgd = 0;
3614 }
3615
3616 if (is_tdp_mmu) {
3617 read_unlock(&kvm->mmu_lock);
3618 WARN_ON_ONCE(!list_empty(&invalid_list));
3619 } else {
3620 kvm_mmu_commit_zap_page(kvm, &invalid_list);
3621 write_unlock(&kvm->mmu_lock);
3622 }
3623 }
3624 EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3625
kvm_mmu_free_guest_mode_roots(struct kvm * kvm,struct kvm_mmu * mmu)3626 void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
3627 {
3628 unsigned long roots_to_free = 0;
3629 struct kvm_mmu_page *sp;
3630 hpa_t root_hpa;
3631 int i;
3632
3633 /*
3634 * This should not be called while L2 is active, L2 can't invalidate
3635 * _only_ its own roots, e.g. INVVPID unconditionally exits.
3636 */
3637 WARN_ON_ONCE(mmu->root_role.guest_mode);
3638
3639 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
3640 root_hpa = mmu->prev_roots[i].hpa;
3641 if (!VALID_PAGE(root_hpa))
3642 continue;
3643
3644 sp = root_to_sp(root_hpa);
3645 if (!sp || sp->role.guest_mode)
3646 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
3647 }
3648
3649 kvm_mmu_free_roots(kvm, mmu, roots_to_free);
3650 }
3651 EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
3652
mmu_alloc_root(struct kvm_vcpu * vcpu,gfn_t gfn,int quadrant,u8 level)3653 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant,
3654 u8 level)
3655 {
3656 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
3657 struct kvm_mmu_page *sp;
3658
3659 role.level = level;
3660 role.quadrant = quadrant;
3661
3662 WARN_ON_ONCE(quadrant && !role.has_4_byte_gpte);
3663 WARN_ON_ONCE(role.direct && role.has_4_byte_gpte);
3664
3665 sp = kvm_mmu_get_shadow_page(vcpu, gfn, role);
3666 ++sp->root_count;
3667
3668 return __pa(sp->spt);
3669 }
3670
mmu_alloc_direct_roots(struct kvm_vcpu * vcpu)3671 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3672 {
3673 struct kvm_mmu *mmu = vcpu->arch.mmu;
3674 u8 shadow_root_level = mmu->root_role.level;
3675 hpa_t root;
3676 unsigned i;
3677 int r;
3678
3679 if (tdp_mmu_enabled)
3680 return kvm_tdp_mmu_alloc_root(vcpu);
3681
3682 write_lock(&vcpu->kvm->mmu_lock);
3683 r = make_mmu_pages_available(vcpu);
3684 if (r < 0)
3685 goto out_unlock;
3686
3687 if (shadow_root_level >= PT64_ROOT_4LEVEL) {
3688 root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level);
3689 mmu->root.hpa = root;
3690 } else if (shadow_root_level == PT32E_ROOT_LEVEL) {
3691 if (WARN_ON_ONCE(!mmu->pae_root)) {
3692 r = -EIO;
3693 goto out_unlock;
3694 }
3695
3696 for (i = 0; i < 4; ++i) {
3697 WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3698
3699 root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), 0,
3700 PT32_ROOT_LEVEL);
3701 mmu->pae_root[i] = root | PT_PRESENT_MASK |
3702 shadow_me_value;
3703 }
3704 mmu->root.hpa = __pa(mmu->pae_root);
3705 } else {
3706 WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
3707 r = -EIO;
3708 goto out_unlock;
3709 }
3710
3711 /* root.pgd is ignored for direct MMUs. */
3712 mmu->root.pgd = 0;
3713 out_unlock:
3714 write_unlock(&vcpu->kvm->mmu_lock);
3715 return r;
3716 }
3717
mmu_first_shadow_root_alloc(struct kvm * kvm)3718 static int mmu_first_shadow_root_alloc(struct kvm *kvm)
3719 {
3720 struct kvm_memslots *slots;
3721 struct kvm_memory_slot *slot;
3722 int r = 0, i, bkt;
3723
3724 /*
3725 * Check if this is the first shadow root being allocated before
3726 * taking the lock.
3727 */
3728 if (kvm_shadow_root_allocated(kvm))
3729 return 0;
3730
3731 mutex_lock(&kvm->slots_arch_lock);
3732
3733 /* Recheck, under the lock, whether this is the first shadow root. */
3734 if (kvm_shadow_root_allocated(kvm))
3735 goto out_unlock;
3736
3737 /*
3738 * Check if anything actually needs to be allocated, e.g. all metadata
3739 * will be allocated upfront if TDP is disabled.
3740 */
3741 if (kvm_memslots_have_rmaps(kvm) &&
3742 kvm_page_track_write_tracking_enabled(kvm))
3743 goto out_success;
3744
3745 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
3746 slots = __kvm_memslots(kvm, i);
3747 kvm_for_each_memslot(slot, bkt, slots) {
3748 /*
3749 * Both of these functions are no-ops if the target is
3750 * already allocated, so unconditionally calling both
3751 * is safe. Intentionally do NOT free allocations on
3752 * failure to avoid having to track which allocations
3753 * were made now versus when the memslot was created.
3754 * The metadata is guaranteed to be freed when the slot
3755 * is freed, and will be kept/used if userspace retries
3756 * KVM_RUN instead of killing the VM.
3757 */
3758 r = memslot_rmap_alloc(slot, slot->npages);
3759 if (r)
3760 goto out_unlock;
3761 r = kvm_page_track_write_tracking_alloc(slot);
3762 if (r)
3763 goto out_unlock;
3764 }
3765 }
3766
3767 /*
3768 * Ensure that shadow_root_allocated becomes true strictly after
3769 * all the related pointers are set.
3770 */
3771 out_success:
3772 smp_store_release(&kvm->arch.shadow_root_allocated, true);
3773
3774 out_unlock:
3775 mutex_unlock(&kvm->slots_arch_lock);
3776 return r;
3777 }
3778
mmu_alloc_shadow_roots(struct kvm_vcpu * vcpu)3779 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3780 {
3781 struct kvm_mmu *mmu = vcpu->arch.mmu;
3782 u64 pdptrs[4], pm_mask;
3783 gfn_t root_gfn, root_pgd;
3784 int quadrant, i, r;
3785 hpa_t root;
3786
3787 root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu);
3788 root_gfn = (root_pgd & __PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
3789
3790 if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3791 mmu->root.hpa = kvm_mmu_get_dummy_root();
3792 return 0;
3793 }
3794
3795 /*
3796 * On SVM, reading PDPTRs might access guest memory, which might fault
3797 * and thus might sleep. Grab the PDPTRs before acquiring mmu_lock.
3798 */
3799 if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
3800 for (i = 0; i < 4; ++i) {
3801 pdptrs[i] = mmu->get_pdptr(vcpu, i);
3802 if (!(pdptrs[i] & PT_PRESENT_MASK))
3803 continue;
3804
3805 if (!kvm_vcpu_is_visible_gfn(vcpu, pdptrs[i] >> PAGE_SHIFT))
3806 pdptrs[i] = 0;
3807 }
3808 }
3809
3810 r = mmu_first_shadow_root_alloc(vcpu->kvm);
3811 if (r)
3812 return r;
3813
3814 write_lock(&vcpu->kvm->mmu_lock);
3815 r = make_mmu_pages_available(vcpu);
3816 if (r < 0)
3817 goto out_unlock;
3818
3819 /*
3820 * Do we shadow a long mode page table? If so we need to
3821 * write-protect the guests page table root.
3822 */
3823 if (mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
3824 root = mmu_alloc_root(vcpu, root_gfn, 0,
3825 mmu->root_role.level);
3826 mmu->root.hpa = root;
3827 goto set_root_pgd;
3828 }
3829
3830 if (WARN_ON_ONCE(!mmu->pae_root)) {
3831 r = -EIO;
3832 goto out_unlock;
3833 }
3834
3835 /*
3836 * We shadow a 32 bit page table. This may be a legacy 2-level
3837 * or a PAE 3-level page table. In either case we need to be aware that
3838 * the shadow page table may be a PAE or a long mode page table.
3839 */
3840 pm_mask = PT_PRESENT_MASK | shadow_me_value;
3841 if (mmu->root_role.level >= PT64_ROOT_4LEVEL) {
3842 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3843
3844 if (WARN_ON_ONCE(!mmu->pml4_root)) {
3845 r = -EIO;
3846 goto out_unlock;
3847 }
3848 mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
3849
3850 if (mmu->root_role.level == PT64_ROOT_5LEVEL) {
3851 if (WARN_ON_ONCE(!mmu->pml5_root)) {
3852 r = -EIO;
3853 goto out_unlock;
3854 }
3855 mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;
3856 }
3857 }
3858
3859 for (i = 0; i < 4; ++i) {
3860 WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3861
3862 if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
3863 if (!(pdptrs[i] & PT_PRESENT_MASK)) {
3864 mmu->pae_root[i] = INVALID_PAE_ROOT;
3865 continue;
3866 }
3867 root_gfn = pdptrs[i] >> PAGE_SHIFT;
3868 }
3869
3870 /*
3871 * If shadowing 32-bit non-PAE page tables, each PAE page
3872 * directory maps one quarter of the guest's non-PAE page
3873 * directory. Othwerise each PAE page direct shadows one guest
3874 * PAE page directory so that quadrant should be 0.
3875 */
3876 quadrant = (mmu->cpu_role.base.level == PT32_ROOT_LEVEL) ? i : 0;
3877
3878 root = mmu_alloc_root(vcpu, root_gfn, quadrant, PT32_ROOT_LEVEL);
3879 mmu->pae_root[i] = root | pm_mask;
3880 }
3881
3882 if (mmu->root_role.level == PT64_ROOT_5LEVEL)
3883 mmu->root.hpa = __pa(mmu->pml5_root);
3884 else if (mmu->root_role.level == PT64_ROOT_4LEVEL)
3885 mmu->root.hpa = __pa(mmu->pml4_root);
3886 else
3887 mmu->root.hpa = __pa(mmu->pae_root);
3888
3889 set_root_pgd:
3890 mmu->root.pgd = root_pgd;
3891 out_unlock:
3892 write_unlock(&vcpu->kvm->mmu_lock);
3893
3894 return r;
3895 }
3896
mmu_alloc_special_roots(struct kvm_vcpu * vcpu)3897 static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
3898 {
3899 struct kvm_mmu *mmu = vcpu->arch.mmu;
3900 bool need_pml5 = mmu->root_role.level > PT64_ROOT_4LEVEL;
3901 u64 *pml5_root = NULL;
3902 u64 *pml4_root = NULL;
3903 u64 *pae_root;
3904
3905 /*
3906 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
3907 * tables are allocated and initialized at root creation as there is no
3908 * equivalent level in the guest's NPT to shadow. Allocate the tables
3909 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
3910 */
3911 if (mmu->root_role.direct ||
3912 mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL ||
3913 mmu->root_role.level < PT64_ROOT_4LEVEL)
3914 return 0;
3915
3916 /*
3917 * NPT, the only paging mode that uses this horror, uses a fixed number
3918 * of levels for the shadow page tables, e.g. all MMUs are 4-level or
3919 * all MMus are 5-level. Thus, this can safely require that pml5_root
3920 * is allocated if the other roots are valid and pml5 is needed, as any
3921 * prior MMU would also have required pml5.
3922 */
3923 if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root))
3924 return 0;
3925
3926 /*
3927 * The special roots should always be allocated in concert. Yell and
3928 * bail if KVM ends up in a state where only one of the roots is valid.
3929 */
3930 if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root ||
3931 (need_pml5 && mmu->pml5_root)))
3932 return -EIO;
3933
3934 /*
3935 * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
3936 * doesn't need to be decrypted.
3937 */
3938 pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3939 if (!pae_root)
3940 return -ENOMEM;
3941
3942 #ifdef CONFIG_X86_64
3943 pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3944 if (!pml4_root)
3945 goto err_pml4;
3946
3947 if (need_pml5) {
3948 pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3949 if (!pml5_root)
3950 goto err_pml5;
3951 }
3952 #endif
3953
3954 mmu->pae_root = pae_root;
3955 mmu->pml4_root = pml4_root;
3956 mmu->pml5_root = pml5_root;
3957
3958 return 0;
3959
3960 #ifdef CONFIG_X86_64
3961 err_pml5:
3962 free_page((unsigned long)pml4_root);
3963 err_pml4:
3964 free_page((unsigned long)pae_root);
3965 return -ENOMEM;
3966 #endif
3967 }
3968
is_unsync_root(hpa_t root)3969 static bool is_unsync_root(hpa_t root)
3970 {
3971 struct kvm_mmu_page *sp;
3972
3973 if (!VALID_PAGE(root) || kvm_mmu_is_dummy_root(root))
3974 return false;
3975
3976 /*
3977 * The read barrier orders the CPU's read of SPTE.W during the page table
3978 * walk before the reads of sp->unsync/sp->unsync_children here.
3979 *
3980 * Even if another CPU was marking the SP as unsync-ed simultaneously,
3981 * any guest page table changes are not guaranteed to be visible anyway
3982 * until this VCPU issues a TLB flush strictly after those changes are
3983 * made. We only need to ensure that the other CPU sets these flags
3984 * before any actual changes to the page tables are made. The comments
3985 * in mmu_try_to_unsync_pages() describe what could go wrong if this
3986 * requirement isn't satisfied.
3987 */
3988 smp_rmb();
3989 sp = root_to_sp(root);
3990
3991 /*
3992 * PAE roots (somewhat arbitrarily) aren't backed by shadow pages, the
3993 * PDPTEs for a given PAE root need to be synchronized individually.
3994 */
3995 if (WARN_ON_ONCE(!sp))
3996 return false;
3997
3998 if (sp->unsync || sp->unsync_children)
3999 return true;
4000
4001 return false;
4002 }
4003
kvm_mmu_sync_roots(struct kvm_vcpu * vcpu)4004 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
4005 {
4006 int i;
4007 struct kvm_mmu_page *sp;
4008
4009 if (vcpu->arch.mmu->root_role.direct)
4010 return;
4011
4012 if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
4013 return;
4014
4015 vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4016
4017 if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
4018 hpa_t root = vcpu->arch.mmu->root.hpa;
4019
4020 if (!is_unsync_root(root))
4021 return;
4022
4023 sp = root_to_sp(root);
4024
4025 write_lock(&vcpu->kvm->mmu_lock);
4026 mmu_sync_children(vcpu, sp, true);
4027 write_unlock(&vcpu->kvm->mmu_lock);
4028 return;
4029 }
4030
4031 write_lock(&vcpu->kvm->mmu_lock);
4032
4033 for (i = 0; i < 4; ++i) {
4034 hpa_t root = vcpu->arch.mmu->pae_root[i];
4035
4036 if (IS_VALID_PAE_ROOT(root)) {
4037 sp = spte_to_child_sp(root);
4038 mmu_sync_children(vcpu, sp, true);
4039 }
4040 }
4041
4042 write_unlock(&vcpu->kvm->mmu_lock);
4043 }
4044
kvm_mmu_sync_prev_roots(struct kvm_vcpu * vcpu)4045 void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
4046 {
4047 unsigned long roots_to_free = 0;
4048 int i;
4049
4050 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
4051 if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa))
4052 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
4053
4054 /* sync prev_roots by simply freeing them */
4055 kvm_mmu_free_roots(vcpu->kvm, vcpu->arch.mmu, roots_to_free);
4056 }
4057
nonpaging_gva_to_gpa(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,gpa_t vaddr,u64 access,struct x86_exception * exception)4058 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
4059 gpa_t vaddr, u64 access,
4060 struct x86_exception *exception)
4061 {
4062 if (exception)
4063 exception->error_code = 0;
4064 return kvm_translate_gpa(vcpu, mmu, vaddr, access, exception);
4065 }
4066
mmio_info_in_cache(struct kvm_vcpu * vcpu,u64 addr,bool direct)4067 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
4068 {
4069 /*
4070 * A nested guest cannot use the MMIO cache if it is using nested
4071 * page tables, because cr2 is a nGPA while the cache stores GPAs.
4072 */
4073 if (mmu_is_nested(vcpu))
4074 return false;
4075
4076 if (direct)
4077 return vcpu_match_mmio_gpa(vcpu, addr);
4078
4079 return vcpu_match_mmio_gva(vcpu, addr);
4080 }
4081
4082 /*
4083 * Return the level of the lowest level SPTE added to sptes.
4084 * That SPTE may be non-present.
4085 *
4086 * Must be called between walk_shadow_page_lockless_{begin,end}.
4087 */
get_walk(struct kvm_vcpu * vcpu,u64 addr,u64 * sptes,int * root_level)4088 static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
4089 {
4090 struct kvm_shadow_walk_iterator iterator;
4091 int leaf = -1;
4092 u64 spte;
4093
4094 for (shadow_walk_init(&iterator, vcpu, addr),
4095 *root_level = iterator.level;
4096 shadow_walk_okay(&iterator);
4097 __shadow_walk_next(&iterator, spte)) {
4098 leaf = iterator.level;
4099 spte = mmu_spte_get_lockless(iterator.sptep);
4100
4101 sptes[leaf] = spte;
4102 }
4103
4104 return leaf;
4105 }
4106
get_sptes_lockless(struct kvm_vcpu * vcpu,u64 addr,u64 * sptes,int * root_level)4107 static int get_sptes_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
4108 int *root_level)
4109 {
4110 int leaf;
4111
4112 walk_shadow_page_lockless_begin(vcpu);
4113
4114 if (is_tdp_mmu_active(vcpu))
4115 leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root_level);
4116 else
4117 leaf = get_walk(vcpu, addr, sptes, root_level);
4118
4119 walk_shadow_page_lockless_end(vcpu);
4120 return leaf;
4121 }
4122
4123 /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
get_mmio_spte(struct kvm_vcpu * vcpu,u64 addr,u64 * sptep)4124 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
4125 {
4126 u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
4127 struct rsvd_bits_validate *rsvd_check;
4128 int root, leaf, level;
4129 bool reserved = false;
4130
4131 leaf = get_sptes_lockless(vcpu, addr, sptes, &root);
4132 if (unlikely(leaf < 0)) {
4133 *sptep = 0ull;
4134 return reserved;
4135 }
4136
4137 *sptep = sptes[leaf];
4138
4139 /*
4140 * Skip reserved bits checks on the terminal leaf if it's not a valid
4141 * SPTE. Note, this also (intentionally) skips MMIO SPTEs, which, by
4142 * design, always have reserved bits set. The purpose of the checks is
4143 * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
4144 */
4145 if (!is_shadow_present_pte(sptes[leaf]))
4146 leaf++;
4147
4148 rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
4149
4150 for (level = root; level >= leaf; level--)
4151 reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
4152
4153 if (reserved) {
4154 pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
4155 __func__, addr);
4156 for (level = root; level >= leaf; level--)
4157 pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
4158 sptes[level], level,
4159 get_rsvd_bits(rsvd_check, sptes[level], level));
4160 }
4161
4162 return reserved;
4163 }
4164
handle_mmio_page_fault(struct kvm_vcpu * vcpu,u64 addr,bool direct)4165 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
4166 {
4167 u64 spte;
4168 bool reserved;
4169
4170 if (mmio_info_in_cache(vcpu, addr, direct))
4171 return RET_PF_EMULATE;
4172
4173 reserved = get_mmio_spte(vcpu, addr, &spte);
4174 if (WARN_ON_ONCE(reserved))
4175 return -EINVAL;
4176
4177 if (is_mmio_spte(vcpu->kvm, spte)) {
4178 gfn_t gfn = get_mmio_spte_gfn(spte);
4179 unsigned int access = get_mmio_spte_access(spte);
4180
4181 if (!check_mmio_spte(vcpu, spte))
4182 return RET_PF_INVALID;
4183
4184 if (direct)
4185 addr = 0;
4186
4187 trace_handle_mmio_page_fault(addr, gfn, access);
4188 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
4189 return RET_PF_EMULATE;
4190 }
4191
4192 /*
4193 * If the page table is zapped by other cpus, let CPU fault again on
4194 * the address.
4195 */
4196 return RET_PF_RETRY;
4197 }
4198
page_fault_handle_page_track(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4199 static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
4200 struct kvm_page_fault *fault)
4201 {
4202 if (unlikely(fault->rsvd))
4203 return false;
4204
4205 if (!fault->present || !fault->write)
4206 return false;
4207
4208 /*
4209 * guest is writing the page which is write tracked which can
4210 * not be fixed by page fault handler.
4211 */
4212 if (kvm_gfn_is_write_tracked(vcpu->kvm, fault->slot, fault->gfn))
4213 return true;
4214
4215 return false;
4216 }
4217
shadow_page_table_clear_flood(struct kvm_vcpu * vcpu,gva_t addr)4218 static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
4219 {
4220 struct kvm_shadow_walk_iterator iterator;
4221 u64 spte;
4222
4223 walk_shadow_page_lockless_begin(vcpu);
4224 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
4225 clear_sp_write_flooding_count(iterator.sptep);
4226 walk_shadow_page_lockless_end(vcpu);
4227 }
4228
alloc_apf_token(struct kvm_vcpu * vcpu)4229 static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
4230 {
4231 /* make sure the token value is not 0 */
4232 u32 id = vcpu->arch.apf.id;
4233
4234 if (id << 12 == 0)
4235 vcpu->arch.apf.id = 1;
4236
4237 return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
4238 }
4239
kvm_arch_setup_async_pf(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4240 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu,
4241 struct kvm_page_fault *fault)
4242 {
4243 struct kvm_arch_async_pf arch;
4244
4245 arch.token = alloc_apf_token(vcpu);
4246 arch.gfn = fault->gfn;
4247 arch.error_code = fault->error_code;
4248 arch.direct_map = vcpu->arch.mmu->root_role.direct;
4249 arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu);
4250
4251 return kvm_setup_async_pf(vcpu, fault->addr,
4252 kvm_vcpu_gfn_to_hva(vcpu, fault->gfn), &arch);
4253 }
4254
kvm_arch_async_page_ready(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)4255 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
4256 {
4257 int r;
4258
4259 if (WARN_ON_ONCE(work->arch.error_code & PFERR_PRIVATE_ACCESS))
4260 return;
4261
4262 if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
4263 work->wakeup_all)
4264 return;
4265
4266 r = kvm_mmu_reload(vcpu);
4267 if (unlikely(r))
4268 return;
4269
4270 if (!vcpu->arch.mmu->root_role.direct &&
4271 work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
4272 return;
4273
4274 kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code, true, NULL);
4275 }
4276
kvm_max_level_for_order(int order)4277 static inline u8 kvm_max_level_for_order(int order)
4278 {
4279 BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
4280
4281 KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) &&
4282 order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) &&
4283 order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K));
4284
4285 if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
4286 return PG_LEVEL_1G;
4287
4288 if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
4289 return PG_LEVEL_2M;
4290
4291 return PG_LEVEL_4K;
4292 }
4293
kvm_faultin_pfn_private(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4294 static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
4295 struct kvm_page_fault *fault)
4296 {
4297 int max_order, r;
4298
4299 if (!kvm_slot_can_be_private(fault->slot)) {
4300 kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4301 return -EFAULT;
4302 }
4303
4304 r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn,
4305 &max_order);
4306 if (r) {
4307 kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4308 return r;
4309 }
4310
4311 fault->max_level = min(kvm_max_level_for_order(max_order),
4312 fault->max_level);
4313 fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY);
4314
4315 return RET_PF_CONTINUE;
4316 }
4317
__kvm_faultin_pfn(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4318 static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4319 {
4320 bool async;
4321
4322 if (fault->is_private)
4323 return kvm_faultin_pfn_private(vcpu, fault);
4324
4325 async = false;
4326 fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, false,
4327 &async, fault->write,
4328 &fault->map_writable, &fault->hva);
4329 if (!async)
4330 return RET_PF_CONTINUE; /* *pfn has correct page already */
4331
4332 if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) {
4333 trace_kvm_try_async_get_page(fault->addr, fault->gfn);
4334 if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
4335 trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn);
4336 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
4337 return RET_PF_RETRY;
4338 } else if (kvm_arch_setup_async_pf(vcpu, fault)) {
4339 return RET_PF_RETRY;
4340 }
4341 }
4342
4343 /*
4344 * Allow gup to bail on pending non-fatal signals when it's also allowed
4345 * to wait for IO. Note, gup always bails if it is unable to quickly
4346 * get a page and a fatal signal, i.e. SIGKILL, is pending.
4347 */
4348 fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, true,
4349 NULL, fault->write,
4350 &fault->map_writable, &fault->hva);
4351 return RET_PF_CONTINUE;
4352 }
4353
kvm_faultin_pfn(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,unsigned int access)4354 static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
4355 unsigned int access)
4356 {
4357 struct kvm_memory_slot *slot = fault->slot;
4358 int ret;
4359
4360 /*
4361 * Note that the mmu_invalidate_seq also serves to detect a concurrent
4362 * change in attributes. is_page_fault_stale() will detect an
4363 * invalidation relate to fault->fn and resume the guest without
4364 * installing a mapping in the page tables.
4365 */
4366 fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq;
4367 smp_rmb();
4368
4369 /*
4370 * Now that we have a snapshot of mmu_invalidate_seq we can check for a
4371 * private vs. shared mismatch.
4372 */
4373 if (fault->is_private != kvm_mem_is_private(vcpu->kvm, fault->gfn)) {
4374 kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4375 return -EFAULT;
4376 }
4377
4378 if (unlikely(!slot))
4379 return kvm_handle_noslot_fault(vcpu, fault, access);
4380
4381 /*
4382 * Retry the page fault if the gfn hit a memslot that is being deleted
4383 * or moved. This ensures any existing SPTEs for the old memslot will
4384 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
4385 */
4386 if (slot->flags & KVM_MEMSLOT_INVALID)
4387 return RET_PF_RETRY;
4388
4389 if (slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT) {
4390 /*
4391 * Don't map L1's APIC access page into L2, KVM doesn't support
4392 * using APICv/AVIC to accelerate L2 accesses to L1's APIC,
4393 * i.e. the access needs to be emulated. Emulating access to
4394 * L1's APIC is also correct if L1 is accelerating L2's own
4395 * virtual APIC, but for some reason L1 also maps _L1's_ APIC
4396 * into L2. Note, vcpu_is_mmio_gpa() always treats access to
4397 * the APIC as MMIO. Allow an MMIO SPTE to be created, as KVM
4398 * uses different roots for L1 vs. L2, i.e. there is no danger
4399 * of breaking APICv/AVIC for L1.
4400 */
4401 if (is_guest_mode(vcpu))
4402 return kvm_handle_noslot_fault(vcpu, fault, access);
4403
4404 /*
4405 * If the APIC access page exists but is disabled, go directly
4406 * to emulation without caching the MMIO access or creating a
4407 * MMIO SPTE. That way the cache doesn't need to be purged
4408 * when the AVIC is re-enabled.
4409 */
4410 if (!kvm_apicv_activated(vcpu->kvm))
4411 return RET_PF_EMULATE;
4412 }
4413
4414 /*
4415 * Check for a relevant mmu_notifier invalidation event before getting
4416 * the pfn from the primary MMU, and before acquiring mmu_lock.
4417 *
4418 * For mmu_lock, if there is an in-progress invalidation and the kernel
4419 * allows preemption, the invalidation task may drop mmu_lock and yield
4420 * in response to mmu_lock being contended, which is *very* counter-
4421 * productive as this vCPU can't actually make forward progress until
4422 * the invalidation completes.
4423 *
4424 * Retrying now can also avoid unnessary lock contention in the primary
4425 * MMU, as the primary MMU doesn't necessarily hold a single lock for
4426 * the duration of the invalidation, i.e. faulting in a conflicting pfn
4427 * can cause the invalidation to take longer by holding locks that are
4428 * needed to complete the invalidation.
4429 *
4430 * Do the pre-check even for non-preemtible kernels, i.e. even if KVM
4431 * will never yield mmu_lock in response to contention, as this vCPU is
4432 * *guaranteed* to need to retry, i.e. waiting until mmu_lock is held
4433 * to detect retry guarantees the worst case latency for the vCPU.
4434 */
4435 if (mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn))
4436 return RET_PF_RETRY;
4437
4438 ret = __kvm_faultin_pfn(vcpu, fault);
4439 if (ret != RET_PF_CONTINUE)
4440 return ret;
4441
4442 if (unlikely(is_error_pfn(fault->pfn)))
4443 return kvm_handle_error_pfn(vcpu, fault);
4444
4445 if (WARN_ON_ONCE(!fault->slot || is_noslot_pfn(fault->pfn)))
4446 return kvm_handle_noslot_fault(vcpu, fault, access);
4447
4448 /*
4449 * Check again for a relevant mmu_notifier invalidation event purely to
4450 * avoid contending mmu_lock. Most invalidations will be detected by
4451 * the previous check, but checking is extremely cheap relative to the
4452 * overall cost of failing to detect the invalidation until after
4453 * mmu_lock is acquired.
4454 */
4455 if (mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn)) {
4456 kvm_release_pfn_clean(fault->pfn);
4457 return RET_PF_RETRY;
4458 }
4459
4460 return RET_PF_CONTINUE;
4461 }
4462
4463 /*
4464 * Returns true if the page fault is stale and needs to be retried, i.e. if the
4465 * root was invalidated by a memslot update or a relevant mmu_notifier fired.
4466 */
is_page_fault_stale(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4467 static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
4468 struct kvm_page_fault *fault)
4469 {
4470 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
4471
4472 /* Special roots, e.g. pae_root, are not backed by shadow pages. */
4473 if (sp && is_obsolete_sp(vcpu->kvm, sp))
4474 return true;
4475
4476 /*
4477 * Roots without an associated shadow page are considered invalid if
4478 * there is a pending request to free obsolete roots. The request is
4479 * only a hint that the current root _may_ be obsolete and needs to be
4480 * reloaded, e.g. if the guest frees a PGD that KVM is tracking as a
4481 * previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs
4482 * to reload even if no vCPU is actively using the root.
4483 */
4484 if (!sp && kvm_test_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
4485 return true;
4486
4487 /*
4488 * Check for a relevant mmu_notifier invalidation event one last time
4489 * now that mmu_lock is held, as the "unsafe" checks performed without
4490 * holding mmu_lock can get false negatives.
4491 */
4492 return fault->slot &&
4493 mmu_invalidate_retry_gfn(vcpu->kvm, fault->mmu_seq, fault->gfn);
4494 }
4495
direct_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4496 static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4497 {
4498 int r;
4499
4500 /* Dummy roots are used only for shadowing bad guest roots. */
4501 if (WARN_ON_ONCE(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa)))
4502 return RET_PF_RETRY;
4503
4504 if (page_fault_handle_page_track(vcpu, fault))
4505 return RET_PF_EMULATE;
4506
4507 r = fast_page_fault(vcpu, fault);
4508 if (r != RET_PF_INVALID)
4509 return r;
4510
4511 r = mmu_topup_memory_caches(vcpu, false);
4512 if (r)
4513 return r;
4514
4515 r = kvm_faultin_pfn(vcpu, fault, ACC_ALL);
4516 if (r != RET_PF_CONTINUE)
4517 return r;
4518
4519 r = RET_PF_RETRY;
4520 write_lock(&vcpu->kvm->mmu_lock);
4521
4522 if (is_page_fault_stale(vcpu, fault))
4523 goto out_unlock;
4524
4525 r = make_mmu_pages_available(vcpu);
4526 if (r)
4527 goto out_unlock;
4528
4529 r = direct_map(vcpu, fault);
4530
4531 out_unlock:
4532 write_unlock(&vcpu->kvm->mmu_lock);
4533 kvm_release_pfn_clean(fault->pfn);
4534 return r;
4535 }
4536
nonpaging_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4537 static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
4538 struct kvm_page_fault *fault)
4539 {
4540 /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
4541 fault->max_level = PG_LEVEL_2M;
4542 return direct_page_fault(vcpu, fault);
4543 }
4544
kvm_handle_page_fault(struct kvm_vcpu * vcpu,u64 error_code,u64 fault_address,char * insn,int insn_len)4545 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
4546 u64 fault_address, char *insn, int insn_len)
4547 {
4548 int r = 1;
4549 u32 flags = vcpu->arch.apf.host_apf_flags;
4550
4551 #ifndef CONFIG_X86_64
4552 /* A 64-bit CR2 should be impossible on 32-bit KVM. */
4553 if (WARN_ON_ONCE(fault_address >> 32))
4554 return -EFAULT;
4555 #endif
4556 /*
4557 * Legacy #PF exception only have a 32-bit error code. Simply drop the
4558 * upper bits as KVM doesn't use them for #PF (because they are never
4559 * set), and to ensure there are no collisions with KVM-defined bits.
4560 */
4561 if (WARN_ON_ONCE(error_code >> 32))
4562 error_code = lower_32_bits(error_code);
4563
4564 /* Ensure the above sanity check also covers KVM-defined flags. */
4565 BUILD_BUG_ON(lower_32_bits(PFERR_SYNTHETIC_MASK));
4566
4567 vcpu->arch.l1tf_flush_l1d = true;
4568 if (!flags) {
4569 trace_kvm_page_fault(vcpu, fault_address, error_code);
4570
4571 if (kvm_event_needs_reinjection(vcpu))
4572 kvm_mmu_unprotect_page_virt(vcpu, fault_address);
4573 r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
4574 insn_len);
4575 } else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
4576 vcpu->arch.apf.host_apf_flags = 0;
4577 local_irq_disable();
4578 kvm_async_pf_task_wait_schedule(fault_address);
4579 local_irq_enable();
4580 } else {
4581 WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
4582 }
4583
4584 return r;
4585 }
4586 EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
4587
4588 #ifdef CONFIG_X86_64
kvm_tdp_mmu_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4589 static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
4590 struct kvm_page_fault *fault)
4591 {
4592 int r;
4593
4594 if (page_fault_handle_page_track(vcpu, fault))
4595 return RET_PF_EMULATE;
4596
4597 r = fast_page_fault(vcpu, fault);
4598 if (r != RET_PF_INVALID)
4599 return r;
4600
4601 r = mmu_topup_memory_caches(vcpu, false);
4602 if (r)
4603 return r;
4604
4605 r = kvm_faultin_pfn(vcpu, fault, ACC_ALL);
4606 if (r != RET_PF_CONTINUE)
4607 return r;
4608
4609 r = RET_PF_RETRY;
4610 read_lock(&vcpu->kvm->mmu_lock);
4611
4612 if (is_page_fault_stale(vcpu, fault))
4613 goto out_unlock;
4614
4615 r = kvm_tdp_mmu_map(vcpu, fault);
4616
4617 out_unlock:
4618 read_unlock(&vcpu->kvm->mmu_lock);
4619 kvm_release_pfn_clean(fault->pfn);
4620 return r;
4621 }
4622 #endif
4623
__kvm_mmu_honors_guest_mtrrs(bool vm_has_noncoherent_dma)4624 bool __kvm_mmu_honors_guest_mtrrs(bool vm_has_noncoherent_dma)
4625 {
4626 /*
4627 * If host MTRRs are ignored (shadow_memtype_mask is non-zero), and the
4628 * VM has non-coherent DMA (DMA doesn't snoop CPU caches), KVM's ABI is
4629 * to honor the memtype from the guest's MTRRs so that guest accesses
4630 * to memory that is DMA'd aren't cached against the guest's wishes.
4631 *
4632 * Note, KVM may still ultimately ignore guest MTRRs for certain PFNs,
4633 * e.g. KVM will force UC memtype for host MMIO.
4634 */
4635 return vm_has_noncoherent_dma && shadow_memtype_mask;
4636 }
4637
kvm_tdp_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4638 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4639 {
4640 /*
4641 * If the guest's MTRRs may be used to compute the "real" memtype,
4642 * restrict the mapping level to ensure KVM uses a consistent memtype
4643 * across the entire mapping.
4644 */
4645 if (kvm_mmu_honors_guest_mtrrs(vcpu->kvm)) {
4646 for ( ; fault->max_level > PG_LEVEL_4K; --fault->max_level) {
4647 int page_num = KVM_PAGES_PER_HPAGE(fault->max_level);
4648 gfn_t base = gfn_round_for_level(fault->gfn,
4649 fault->max_level);
4650
4651 if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
4652 break;
4653 }
4654 }
4655
4656 #ifdef CONFIG_X86_64
4657 if (tdp_mmu_enabled)
4658 return kvm_tdp_mmu_page_fault(vcpu, fault);
4659 #endif
4660
4661 return direct_page_fault(vcpu, fault);
4662 }
4663
nonpaging_init_context(struct kvm_mmu * context)4664 static void nonpaging_init_context(struct kvm_mmu *context)
4665 {
4666 context->page_fault = nonpaging_page_fault;
4667 context->gva_to_gpa = nonpaging_gva_to_gpa;
4668 context->sync_spte = NULL;
4669 }
4670
is_root_usable(struct kvm_mmu_root_info * root,gpa_t pgd,union kvm_mmu_page_role role)4671 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
4672 union kvm_mmu_page_role role)
4673 {
4674 struct kvm_mmu_page *sp;
4675
4676 if (!VALID_PAGE(root->hpa))
4677 return false;
4678
4679 if (!role.direct && pgd != root->pgd)
4680 return false;
4681
4682 sp = root_to_sp(root->hpa);
4683 if (WARN_ON_ONCE(!sp))
4684 return false;
4685
4686 return role.word == sp->role.word;
4687 }
4688
4689 /*
4690 * Find out if a previously cached root matching the new pgd/role is available,
4691 * and insert the current root as the MRU in the cache.
4692 * If a matching root is found, it is assigned to kvm_mmu->root and
4693 * true is returned.
4694 * If no match is found, kvm_mmu->root is left invalid, the LRU root is
4695 * evicted to make room for the current root, and false is returned.
4696 */
cached_root_find_and_keep_current(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role)4697 static bool cached_root_find_and_keep_current(struct kvm *kvm, struct kvm_mmu *mmu,
4698 gpa_t new_pgd,
4699 union kvm_mmu_page_role new_role)
4700 {
4701 uint i;
4702
4703 if (is_root_usable(&mmu->root, new_pgd, new_role))
4704 return true;
4705
4706 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
4707 /*
4708 * The swaps end up rotating the cache like this:
4709 * C 0 1 2 3 (on entry to the function)
4710 * 0 C 1 2 3
4711 * 1 C 0 2 3
4712 * 2 C 0 1 3
4713 * 3 C 0 1 2 (on exit from the loop)
4714 */
4715 swap(mmu->root, mmu->prev_roots[i]);
4716 if (is_root_usable(&mmu->root, new_pgd, new_role))
4717 return true;
4718 }
4719
4720 kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
4721 return false;
4722 }
4723
4724 /*
4725 * Find out if a previously cached root matching the new pgd/role is available.
4726 * On entry, mmu->root is invalid.
4727 * If a matching root is found, it is assigned to kvm_mmu->root, the LRU entry
4728 * of the cache becomes invalid, and true is returned.
4729 * If no match is found, kvm_mmu->root is left invalid and false is returned.
4730 */
cached_root_find_without_current(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role)4731 static bool cached_root_find_without_current(struct kvm *kvm, struct kvm_mmu *mmu,
4732 gpa_t new_pgd,
4733 union kvm_mmu_page_role new_role)
4734 {
4735 uint i;
4736
4737 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
4738 if (is_root_usable(&mmu->prev_roots[i], new_pgd, new_role))
4739 goto hit;
4740
4741 return false;
4742
4743 hit:
4744 swap(mmu->root, mmu->prev_roots[i]);
4745 /* Bubble up the remaining roots. */
4746 for (; i < KVM_MMU_NUM_PREV_ROOTS - 1; i++)
4747 mmu->prev_roots[i] = mmu->prev_roots[i + 1];
4748 mmu->prev_roots[i].hpa = INVALID_PAGE;
4749 return true;
4750 }
4751
fast_pgd_switch(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role)4752 static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
4753 gpa_t new_pgd, union kvm_mmu_page_role new_role)
4754 {
4755 /*
4756 * Limit reuse to 64-bit hosts+VMs without "special" roots in order to
4757 * avoid having to deal with PDPTEs and other complexities.
4758 */
4759 if (VALID_PAGE(mmu->root.hpa) && !root_to_sp(mmu->root.hpa))
4760 kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
4761
4762 if (VALID_PAGE(mmu->root.hpa))
4763 return cached_root_find_and_keep_current(kvm, mmu, new_pgd, new_role);
4764 else
4765 return cached_root_find_without_current(kvm, mmu, new_pgd, new_role);
4766 }
4767
kvm_mmu_new_pgd(struct kvm_vcpu * vcpu,gpa_t new_pgd)4768 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
4769 {
4770 struct kvm_mmu *mmu = vcpu->arch.mmu;
4771 union kvm_mmu_page_role new_role = mmu->root_role;
4772
4773 /*
4774 * Return immediately if no usable root was found, kvm_mmu_reload()
4775 * will establish a valid root prior to the next VM-Enter.
4776 */
4777 if (!fast_pgd_switch(vcpu->kvm, mmu, new_pgd, new_role))
4778 return;
4779
4780 /*
4781 * It's possible that the cached previous root page is obsolete because
4782 * of a change in the MMU generation number. However, changing the
4783 * generation number is accompanied by KVM_REQ_MMU_FREE_OBSOLETE_ROOTS,
4784 * which will free the root set here and allocate a new one.
4785 */
4786 kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
4787
4788 if (force_flush_and_sync_on_reuse) {
4789 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
4790 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
4791 }
4792
4793 /*
4794 * The last MMIO access's GVA and GPA are cached in the VCPU. When
4795 * switching to a new CR3, that GVA->GPA mapping may no longer be
4796 * valid. So clear any cached MMIO info even when we don't need to sync
4797 * the shadow page tables.
4798 */
4799 vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4800
4801 /*
4802 * If this is a direct root page, it doesn't have a write flooding
4803 * count. Otherwise, clear the write flooding count.
4804 */
4805 if (!new_role.direct) {
4806 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
4807
4808 if (!WARN_ON_ONCE(!sp))
4809 __clear_sp_write_flooding_count(sp);
4810 }
4811 }
4812 EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
4813
sync_mmio_spte(struct kvm_vcpu * vcpu,u64 * sptep,gfn_t gfn,unsigned int access)4814 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4815 unsigned int access)
4816 {
4817 if (unlikely(is_mmio_spte(vcpu->kvm, *sptep))) {
4818 if (gfn != get_mmio_spte_gfn(*sptep)) {
4819 mmu_spte_clear_no_track(sptep);
4820 return true;
4821 }
4822
4823 mark_mmio_spte(vcpu, sptep, gfn, access);
4824 return true;
4825 }
4826
4827 return false;
4828 }
4829
4830 #define PTTYPE_EPT 18 /* arbitrary */
4831 #define PTTYPE PTTYPE_EPT
4832 #include "paging_tmpl.h"
4833 #undef PTTYPE
4834
4835 #define PTTYPE 64
4836 #include "paging_tmpl.h"
4837 #undef PTTYPE
4838
4839 #define PTTYPE 32
4840 #include "paging_tmpl.h"
4841 #undef PTTYPE
4842
__reset_rsvds_bits_mask(struct rsvd_bits_validate * rsvd_check,u64 pa_bits_rsvd,int level,bool nx,bool gbpages,bool pse,bool amd)4843 static void __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
4844 u64 pa_bits_rsvd, int level, bool nx,
4845 bool gbpages, bool pse, bool amd)
4846 {
4847 u64 gbpages_bit_rsvd = 0;
4848 u64 nonleaf_bit8_rsvd = 0;
4849 u64 high_bits_rsvd;
4850
4851 rsvd_check->bad_mt_xwr = 0;
4852
4853 if (!gbpages)
4854 gbpages_bit_rsvd = rsvd_bits(7, 7);
4855
4856 if (level == PT32E_ROOT_LEVEL)
4857 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
4858 else
4859 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4860
4861 /* Note, NX doesn't exist in PDPTEs, this is handled below. */
4862 if (!nx)
4863 high_bits_rsvd |= rsvd_bits(63, 63);
4864
4865 /*
4866 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
4867 * leaf entries) on AMD CPUs only.
4868 */
4869 if (amd)
4870 nonleaf_bit8_rsvd = rsvd_bits(8, 8);
4871
4872 switch (level) {
4873 case PT32_ROOT_LEVEL:
4874 /* no rsvd bits for 2 level 4K page table entries */
4875 rsvd_check->rsvd_bits_mask[0][1] = 0;
4876 rsvd_check->rsvd_bits_mask[0][0] = 0;
4877 rsvd_check->rsvd_bits_mask[1][0] =
4878 rsvd_check->rsvd_bits_mask[0][0];
4879
4880 if (!pse) {
4881 rsvd_check->rsvd_bits_mask[1][1] = 0;
4882 break;
4883 }
4884
4885 if (is_cpuid_PSE36())
4886 /* 36bits PSE 4MB page */
4887 rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4888 else
4889 /* 32 bits PSE 4MB page */
4890 rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4891 break;
4892 case PT32E_ROOT_LEVEL:
4893 rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
4894 high_bits_rsvd |
4895 rsvd_bits(5, 8) |
4896 rsvd_bits(1, 2); /* PDPTE */
4897 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd; /* PDE */
4898 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; /* PTE */
4899 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4900 rsvd_bits(13, 20); /* large page */
4901 rsvd_check->rsvd_bits_mask[1][0] =
4902 rsvd_check->rsvd_bits_mask[0][0];
4903 break;
4904 case PT64_ROOT_5LEVEL:
4905 rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
4906 nonleaf_bit8_rsvd |
4907 rsvd_bits(7, 7);
4908 rsvd_check->rsvd_bits_mask[1][4] =
4909 rsvd_check->rsvd_bits_mask[0][4];
4910 fallthrough;
4911 case PT64_ROOT_4LEVEL:
4912 rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
4913 nonleaf_bit8_rsvd |
4914 rsvd_bits(7, 7);
4915 rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
4916 gbpages_bit_rsvd;
4917 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
4918 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4919 rsvd_check->rsvd_bits_mask[1][3] =
4920 rsvd_check->rsvd_bits_mask[0][3];
4921 rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
4922 gbpages_bit_rsvd |
4923 rsvd_bits(13, 29);
4924 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4925 rsvd_bits(13, 20); /* large page */
4926 rsvd_check->rsvd_bits_mask[1][0] =
4927 rsvd_check->rsvd_bits_mask[0][0];
4928 break;
4929 }
4930 }
4931
reset_guest_rsvds_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context)4932 static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4933 struct kvm_mmu *context)
4934 {
4935 __reset_rsvds_bits_mask(&context->guest_rsvd_check,
4936 vcpu->arch.reserved_gpa_bits,
4937 context->cpu_role.base.level, is_efer_nx(context),
4938 guest_can_use(vcpu, X86_FEATURE_GBPAGES),
4939 is_cr4_pse(context),
4940 guest_cpuid_is_amd_compatible(vcpu));
4941 }
4942
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate * rsvd_check,u64 pa_bits_rsvd,bool execonly,int huge_page_level)4943 static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
4944 u64 pa_bits_rsvd, bool execonly,
4945 int huge_page_level)
4946 {
4947 u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4948 u64 large_1g_rsvd = 0, large_2m_rsvd = 0;
4949 u64 bad_mt_xwr;
4950
4951 if (huge_page_level < PG_LEVEL_1G)
4952 large_1g_rsvd = rsvd_bits(7, 7);
4953 if (huge_page_level < PG_LEVEL_2M)
4954 large_2m_rsvd = rsvd_bits(7, 7);
4955
4956 rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
4957 rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
4958 rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6) | large_1g_rsvd;
4959 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6) | large_2m_rsvd;
4960 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4961
4962 /* large page */
4963 rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4964 rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
4965 rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29) | large_1g_rsvd;
4966 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20) | large_2m_rsvd;
4967 rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4968
4969 bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */
4970 bad_mt_xwr |= 0xFFull << (3 * 8); /* bits 3..5 must not be 3 */
4971 bad_mt_xwr |= 0xFFull << (7 * 8); /* bits 3..5 must not be 7 */
4972 bad_mt_xwr |= REPEAT_BYTE(1ull << 2); /* bits 0..2 must not be 010 */
4973 bad_mt_xwr |= REPEAT_BYTE(1ull << 6); /* bits 0..2 must not be 110 */
4974 if (!execonly) {
4975 /* bits 0..2 must not be 100 unless VMX capabilities allow it */
4976 bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4977 }
4978 rsvd_check->bad_mt_xwr = bad_mt_xwr;
4979 }
4980
reset_rsvds_bits_mask_ept(struct kvm_vcpu * vcpu,struct kvm_mmu * context,bool execonly,int huge_page_level)4981 static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
4982 struct kvm_mmu *context, bool execonly, int huge_page_level)
4983 {
4984 __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
4985 vcpu->arch.reserved_gpa_bits, execonly,
4986 huge_page_level);
4987 }
4988
reserved_hpa_bits(void)4989 static inline u64 reserved_hpa_bits(void)
4990 {
4991 return rsvd_bits(shadow_phys_bits, 63);
4992 }
4993
4994 /*
4995 * the page table on host is the shadow page table for the page
4996 * table in guest or amd nested guest, its mmu features completely
4997 * follow the features in guest.
4998 */
reset_shadow_zero_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context)4999 static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
5000 struct kvm_mmu *context)
5001 {
5002 /* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
5003 bool is_amd = true;
5004 /* KVM doesn't use 2-level page tables for the shadow MMU. */
5005 bool is_pse = false;
5006 struct rsvd_bits_validate *shadow_zero_check;
5007 int i;
5008
5009 WARN_ON_ONCE(context->root_role.level < PT32E_ROOT_LEVEL);
5010
5011 shadow_zero_check = &context->shadow_zero_check;
5012 __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
5013 context->root_role.level,
5014 context->root_role.efer_nx,
5015 guest_can_use(vcpu, X86_FEATURE_GBPAGES),
5016 is_pse, is_amd);
5017
5018 if (!shadow_me_mask)
5019 return;
5020
5021 for (i = context->root_role.level; --i >= 0;) {
5022 /*
5023 * So far shadow_me_value is a constant during KVM's life
5024 * time. Bits in shadow_me_value are allowed to be set.
5025 * Bits in shadow_me_mask but not in shadow_me_value are
5026 * not allowed to be set.
5027 */
5028 shadow_zero_check->rsvd_bits_mask[0][i] |= shadow_me_mask;
5029 shadow_zero_check->rsvd_bits_mask[1][i] |= shadow_me_mask;
5030 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_value;
5031 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_value;
5032 }
5033
5034 }
5035
boot_cpu_is_amd(void)5036 static inline bool boot_cpu_is_amd(void)
5037 {
5038 WARN_ON_ONCE(!tdp_enabled);
5039 return shadow_x_mask == 0;
5040 }
5041
5042 /*
5043 * the direct page table on host, use as much mmu features as
5044 * possible, however, kvm currently does not do execution-protection.
5045 */
reset_tdp_shadow_zero_bits_mask(struct kvm_mmu * context)5046 static void reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context)
5047 {
5048 struct rsvd_bits_validate *shadow_zero_check;
5049 int i;
5050
5051 shadow_zero_check = &context->shadow_zero_check;
5052
5053 if (boot_cpu_is_amd())
5054 __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
5055 context->root_role.level, true,
5056 boot_cpu_has(X86_FEATURE_GBPAGES),
5057 false, true);
5058 else
5059 __reset_rsvds_bits_mask_ept(shadow_zero_check,
5060 reserved_hpa_bits(), false,
5061 max_huge_page_level);
5062
5063 if (!shadow_me_mask)
5064 return;
5065
5066 for (i = context->root_role.level; --i >= 0;) {
5067 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
5068 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
5069 }
5070 }
5071
5072 /*
5073 * as the comments in reset_shadow_zero_bits_mask() except it
5074 * is the shadow page table for intel nested guest.
5075 */
5076 static void
reset_ept_shadow_zero_bits_mask(struct kvm_mmu * context,bool execonly)5077 reset_ept_shadow_zero_bits_mask(struct kvm_mmu *context, bool execonly)
5078 {
5079 __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
5080 reserved_hpa_bits(), execonly,
5081 max_huge_page_level);
5082 }
5083
5084 #define BYTE_MASK(access) \
5085 ((1 & (access) ? 2 : 0) | \
5086 (2 & (access) ? 4 : 0) | \
5087 (3 & (access) ? 8 : 0) | \
5088 (4 & (access) ? 16 : 0) | \
5089 (5 & (access) ? 32 : 0) | \
5090 (6 & (access) ? 64 : 0) | \
5091 (7 & (access) ? 128 : 0))
5092
5093
update_permission_bitmask(struct kvm_mmu * mmu,bool ept)5094 static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
5095 {
5096 unsigned byte;
5097
5098 const u8 x = BYTE_MASK(ACC_EXEC_MASK);
5099 const u8 w = BYTE_MASK(ACC_WRITE_MASK);
5100 const u8 u = BYTE_MASK(ACC_USER_MASK);
5101
5102 bool cr4_smep = is_cr4_smep(mmu);
5103 bool cr4_smap = is_cr4_smap(mmu);
5104 bool cr0_wp = is_cr0_wp(mmu);
5105 bool efer_nx = is_efer_nx(mmu);
5106
5107 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
5108 unsigned pfec = byte << 1;
5109
5110 /*
5111 * Each "*f" variable has a 1 bit for each UWX value
5112 * that causes a fault with the given PFEC.
5113 */
5114
5115 /* Faults from writes to non-writable pages */
5116 u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
5117 /* Faults from user mode accesses to supervisor pages */
5118 u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
5119 /* Faults from fetches of non-executable pages*/
5120 u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
5121 /* Faults from kernel mode fetches of user pages */
5122 u8 smepf = 0;
5123 /* Faults from kernel mode accesses of user pages */
5124 u8 smapf = 0;
5125
5126 if (!ept) {
5127 /* Faults from kernel mode accesses to user pages */
5128 u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
5129
5130 /* Not really needed: !nx will cause pte.nx to fault */
5131 if (!efer_nx)
5132 ff = 0;
5133
5134 /* Allow supervisor writes if !cr0.wp */
5135 if (!cr0_wp)
5136 wf = (pfec & PFERR_USER_MASK) ? wf : 0;
5137
5138 /* Disallow supervisor fetches of user code if cr4.smep */
5139 if (cr4_smep)
5140 smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
5141
5142 /*
5143 * SMAP:kernel-mode data accesses from user-mode
5144 * mappings should fault. A fault is considered
5145 * as a SMAP violation if all of the following
5146 * conditions are true:
5147 * - X86_CR4_SMAP is set in CR4
5148 * - A user page is accessed
5149 * - The access is not a fetch
5150 * - The access is supervisor mode
5151 * - If implicit supervisor access or X86_EFLAGS_AC is clear
5152 *
5153 * Here, we cover the first four conditions.
5154 * The fifth is computed dynamically in permission_fault();
5155 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
5156 * *not* subject to SMAP restrictions.
5157 */
5158 if (cr4_smap)
5159 smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
5160 }
5161
5162 mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
5163 }
5164 }
5165
5166 /*
5167 * PKU is an additional mechanism by which the paging controls access to
5168 * user-mode addresses based on the value in the PKRU register. Protection
5169 * key violations are reported through a bit in the page fault error code.
5170 * Unlike other bits of the error code, the PK bit is not known at the
5171 * call site of e.g. gva_to_gpa; it must be computed directly in
5172 * permission_fault based on two bits of PKRU, on some machine state (CR4,
5173 * CR0, EFER, CPL), and on other bits of the error code and the page tables.
5174 *
5175 * In particular the following conditions come from the error code, the
5176 * page tables and the machine state:
5177 * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
5178 * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
5179 * - PK is always zero if U=0 in the page tables
5180 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
5181 *
5182 * The PKRU bitmask caches the result of these four conditions. The error
5183 * code (minus the P bit) and the page table's U bit form an index into the
5184 * PKRU bitmask. Two bits of the PKRU bitmask are then extracted and ANDed
5185 * with the two bits of the PKRU register corresponding to the protection key.
5186 * For the first three conditions above the bits will be 00, thus masking
5187 * away both AD and WD. For all reads or if the last condition holds, WD
5188 * only will be masked away.
5189 */
update_pkru_bitmask(struct kvm_mmu * mmu)5190 static void update_pkru_bitmask(struct kvm_mmu *mmu)
5191 {
5192 unsigned bit;
5193 bool wp;
5194
5195 mmu->pkru_mask = 0;
5196
5197 if (!is_cr4_pke(mmu))
5198 return;
5199
5200 wp = is_cr0_wp(mmu);
5201
5202 for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
5203 unsigned pfec, pkey_bits;
5204 bool check_pkey, check_write, ff, uf, wf, pte_user;
5205
5206 pfec = bit << 1;
5207 ff = pfec & PFERR_FETCH_MASK;
5208 uf = pfec & PFERR_USER_MASK;
5209 wf = pfec & PFERR_WRITE_MASK;
5210
5211 /* PFEC.RSVD is replaced by ACC_USER_MASK. */
5212 pte_user = pfec & PFERR_RSVD_MASK;
5213
5214 /*
5215 * Only need to check the access which is not an
5216 * instruction fetch and is to a user page.
5217 */
5218 check_pkey = (!ff && pte_user);
5219 /*
5220 * write access is controlled by PKRU if it is a
5221 * user access or CR0.WP = 1.
5222 */
5223 check_write = check_pkey && wf && (uf || wp);
5224
5225 /* PKRU.AD stops both read and write access. */
5226 pkey_bits = !!check_pkey;
5227 /* PKRU.WD stops write access. */
5228 pkey_bits |= (!!check_write) << 1;
5229
5230 mmu->pkru_mask |= (pkey_bits & 3) << pfec;
5231 }
5232 }
5233
reset_guest_paging_metadata(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)5234 static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
5235 struct kvm_mmu *mmu)
5236 {
5237 if (!is_cr0_pg(mmu))
5238 return;
5239
5240 reset_guest_rsvds_bits_mask(vcpu, mmu);
5241 update_permission_bitmask(mmu, false);
5242 update_pkru_bitmask(mmu);
5243 }
5244
paging64_init_context(struct kvm_mmu * context)5245 static void paging64_init_context(struct kvm_mmu *context)
5246 {
5247 context->page_fault = paging64_page_fault;
5248 context->gva_to_gpa = paging64_gva_to_gpa;
5249 context->sync_spte = paging64_sync_spte;
5250 }
5251
paging32_init_context(struct kvm_mmu * context)5252 static void paging32_init_context(struct kvm_mmu *context)
5253 {
5254 context->page_fault = paging32_page_fault;
5255 context->gva_to_gpa = paging32_gva_to_gpa;
5256 context->sync_spte = paging32_sync_spte;
5257 }
5258
kvm_calc_cpu_role(struct kvm_vcpu * vcpu,const struct kvm_mmu_role_regs * regs)5259 static union kvm_cpu_role kvm_calc_cpu_role(struct kvm_vcpu *vcpu,
5260 const struct kvm_mmu_role_regs *regs)
5261 {
5262 union kvm_cpu_role role = {0};
5263
5264 role.base.access = ACC_ALL;
5265 role.base.smm = is_smm(vcpu);
5266 role.base.guest_mode = is_guest_mode(vcpu);
5267 role.ext.valid = 1;
5268
5269 if (!____is_cr0_pg(regs)) {
5270 role.base.direct = 1;
5271 return role;
5272 }
5273
5274 role.base.efer_nx = ____is_efer_nx(regs);
5275 role.base.cr0_wp = ____is_cr0_wp(regs);
5276 role.base.smep_andnot_wp = ____is_cr4_smep(regs) && !____is_cr0_wp(regs);
5277 role.base.smap_andnot_wp = ____is_cr4_smap(regs) && !____is_cr0_wp(regs);
5278 role.base.has_4_byte_gpte = !____is_cr4_pae(regs);
5279
5280 if (____is_efer_lma(regs))
5281 role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL
5282 : PT64_ROOT_4LEVEL;
5283 else if (____is_cr4_pae(regs))
5284 role.base.level = PT32E_ROOT_LEVEL;
5285 else
5286 role.base.level = PT32_ROOT_LEVEL;
5287
5288 role.ext.cr4_smep = ____is_cr4_smep(regs);
5289 role.ext.cr4_smap = ____is_cr4_smap(regs);
5290 role.ext.cr4_pse = ____is_cr4_pse(regs);
5291
5292 /* PKEY and LA57 are active iff long mode is active. */
5293 role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
5294 role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
5295 role.ext.efer_lma = ____is_efer_lma(regs);
5296 return role;
5297 }
5298
__kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)5299 void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
5300 struct kvm_mmu *mmu)
5301 {
5302 const bool cr0_wp = kvm_is_cr0_bit_set(vcpu, X86_CR0_WP);
5303
5304 BUILD_BUG_ON((KVM_MMU_CR0_ROLE_BITS & KVM_POSSIBLE_CR0_GUEST_BITS) != X86_CR0_WP);
5305 BUILD_BUG_ON((KVM_MMU_CR4_ROLE_BITS & KVM_POSSIBLE_CR4_GUEST_BITS));
5306
5307 if (is_cr0_wp(mmu) == cr0_wp)
5308 return;
5309
5310 mmu->cpu_role.base.cr0_wp = cr0_wp;
5311 reset_guest_paging_metadata(vcpu, mmu);
5312 }
5313
kvm_mmu_get_tdp_level(struct kvm_vcpu * vcpu)5314 static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
5315 {
5316 /* tdp_root_level is architecture forced level, use it if nonzero */
5317 if (tdp_root_level)
5318 return tdp_root_level;
5319
5320 /* Use 5-level TDP if and only if it's useful/necessary. */
5321 if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
5322 return 4;
5323
5324 return max_tdp_level;
5325 }
5326
kvm_mmu_get_max_tdp_level(void)5327 u8 kvm_mmu_get_max_tdp_level(void)
5328 {
5329 return tdp_root_level ? tdp_root_level : max_tdp_level;
5330 }
5331
5332 static union kvm_mmu_page_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5333 kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
5334 union kvm_cpu_role cpu_role)
5335 {
5336 union kvm_mmu_page_role role = {0};
5337
5338 role.access = ACC_ALL;
5339 role.cr0_wp = true;
5340 role.efer_nx = true;
5341 role.smm = cpu_role.base.smm;
5342 role.guest_mode = cpu_role.base.guest_mode;
5343 role.ad_disabled = !kvm_ad_enabled();
5344 role.level = kvm_mmu_get_tdp_level(vcpu);
5345 role.direct = true;
5346 role.has_4_byte_gpte = false;
5347
5348 return role;
5349 }
5350
init_kvm_tdp_mmu(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5351 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
5352 union kvm_cpu_role cpu_role)
5353 {
5354 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5355 union kvm_mmu_page_role root_role = kvm_calc_tdp_mmu_root_page_role(vcpu, cpu_role);
5356
5357 if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
5358 root_role.word == context->root_role.word)
5359 return;
5360
5361 context->cpu_role.as_u64 = cpu_role.as_u64;
5362 context->root_role.word = root_role.word;
5363 context->page_fault = kvm_tdp_page_fault;
5364 context->sync_spte = NULL;
5365 context->get_guest_pgd = get_guest_cr3;
5366 context->get_pdptr = kvm_pdptr_read;
5367 context->inject_page_fault = kvm_inject_page_fault;
5368
5369 if (!is_cr0_pg(context))
5370 context->gva_to_gpa = nonpaging_gva_to_gpa;
5371 else if (is_cr4_pae(context))
5372 context->gva_to_gpa = paging64_gva_to_gpa;
5373 else
5374 context->gva_to_gpa = paging32_gva_to_gpa;
5375
5376 reset_guest_paging_metadata(vcpu, context);
5377 reset_tdp_shadow_zero_bits_mask(context);
5378 }
5379
shadow_mmu_init_context(struct kvm_vcpu * vcpu,struct kvm_mmu * context,union kvm_cpu_role cpu_role,union kvm_mmu_page_role root_role)5380 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
5381 union kvm_cpu_role cpu_role,
5382 union kvm_mmu_page_role root_role)
5383 {
5384 if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
5385 root_role.word == context->root_role.word)
5386 return;
5387
5388 context->cpu_role.as_u64 = cpu_role.as_u64;
5389 context->root_role.word = root_role.word;
5390
5391 if (!is_cr0_pg(context))
5392 nonpaging_init_context(context);
5393 else if (is_cr4_pae(context))
5394 paging64_init_context(context);
5395 else
5396 paging32_init_context(context);
5397
5398 reset_guest_paging_metadata(vcpu, context);
5399 reset_shadow_zero_bits_mask(vcpu, context);
5400 }
5401
kvm_init_shadow_mmu(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5402 static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
5403 union kvm_cpu_role cpu_role)
5404 {
5405 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5406 union kvm_mmu_page_role root_role;
5407
5408 root_role = cpu_role.base;
5409
5410 /* KVM uses PAE paging whenever the guest isn't using 64-bit paging. */
5411 root_role.level = max_t(u32, root_role.level, PT32E_ROOT_LEVEL);
5412
5413 /*
5414 * KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role.
5415 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
5416 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
5417 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
5418 * The iTLB multi-hit workaround can be toggled at any time, so assume
5419 * NX can be used by any non-nested shadow MMU to avoid having to reset
5420 * MMU contexts.
5421 */
5422 root_role.efer_nx = true;
5423
5424 shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
5425 }
5426
kvm_init_shadow_npt_mmu(struct kvm_vcpu * vcpu,unsigned long cr0,unsigned long cr4,u64 efer,gpa_t nested_cr3)5427 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
5428 unsigned long cr4, u64 efer, gpa_t nested_cr3)
5429 {
5430 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5431 struct kvm_mmu_role_regs regs = {
5432 .cr0 = cr0,
5433 .cr4 = cr4 & ~X86_CR4_PKE,
5434 .efer = efer,
5435 };
5436 union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, ®s);
5437 union kvm_mmu_page_role root_role;
5438
5439 /* NPT requires CR0.PG=1. */
5440 WARN_ON_ONCE(cpu_role.base.direct);
5441
5442 root_role = cpu_role.base;
5443 root_role.level = kvm_mmu_get_tdp_level(vcpu);
5444 if (root_role.level == PT64_ROOT_5LEVEL &&
5445 cpu_role.base.level == PT64_ROOT_4LEVEL)
5446 root_role.passthrough = 1;
5447
5448 shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
5449 kvm_mmu_new_pgd(vcpu, nested_cr3);
5450 }
5451 EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
5452
5453 static union kvm_cpu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu * vcpu,bool accessed_dirty,bool execonly,u8 level)5454 kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
5455 bool execonly, u8 level)
5456 {
5457 union kvm_cpu_role role = {0};
5458
5459 /*
5460 * KVM does not support SMM transfer monitors, and consequently does not
5461 * support the "entry to SMM" control either. role.base.smm is always 0.
5462 */
5463 WARN_ON_ONCE(is_smm(vcpu));
5464 role.base.level = level;
5465 role.base.has_4_byte_gpte = false;
5466 role.base.direct = false;
5467 role.base.ad_disabled = !accessed_dirty;
5468 role.base.guest_mode = true;
5469 role.base.access = ACC_ALL;
5470
5471 role.ext.word = 0;
5472 role.ext.execonly = execonly;
5473 role.ext.valid = 1;
5474
5475 return role;
5476 }
5477
kvm_init_shadow_ept_mmu(struct kvm_vcpu * vcpu,bool execonly,int huge_page_level,bool accessed_dirty,gpa_t new_eptp)5478 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
5479 int huge_page_level, bool accessed_dirty,
5480 gpa_t new_eptp)
5481 {
5482 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5483 u8 level = vmx_eptp_page_walk_level(new_eptp);
5484 union kvm_cpu_role new_mode =
5485 kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
5486 execonly, level);
5487
5488 if (new_mode.as_u64 != context->cpu_role.as_u64) {
5489 /* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
5490 context->cpu_role.as_u64 = new_mode.as_u64;
5491 context->root_role.word = new_mode.base.word;
5492
5493 context->page_fault = ept_page_fault;
5494 context->gva_to_gpa = ept_gva_to_gpa;
5495 context->sync_spte = ept_sync_spte;
5496
5497 update_permission_bitmask(context, true);
5498 context->pkru_mask = 0;
5499 reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level);
5500 reset_ept_shadow_zero_bits_mask(context, execonly);
5501 }
5502
5503 kvm_mmu_new_pgd(vcpu, new_eptp);
5504 }
5505 EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
5506
init_kvm_softmmu(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5507 static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
5508 union kvm_cpu_role cpu_role)
5509 {
5510 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5511
5512 kvm_init_shadow_mmu(vcpu, cpu_role);
5513
5514 context->get_guest_pgd = get_guest_cr3;
5515 context->get_pdptr = kvm_pdptr_read;
5516 context->inject_page_fault = kvm_inject_page_fault;
5517 }
5518
init_kvm_nested_mmu(struct kvm_vcpu * vcpu,union kvm_cpu_role new_mode)5519 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
5520 union kvm_cpu_role new_mode)
5521 {
5522 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
5523
5524 if (new_mode.as_u64 == g_context->cpu_role.as_u64)
5525 return;
5526
5527 g_context->cpu_role.as_u64 = new_mode.as_u64;
5528 g_context->get_guest_pgd = get_guest_cr3;
5529 g_context->get_pdptr = kvm_pdptr_read;
5530 g_context->inject_page_fault = kvm_inject_page_fault;
5531
5532 /*
5533 * L2 page tables are never shadowed, so there is no need to sync
5534 * SPTEs.
5535 */
5536 g_context->sync_spte = NULL;
5537
5538 /*
5539 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
5540 * L1's nested page tables (e.g. EPT12). The nested translation
5541 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
5542 * L2's page tables as the first level of translation and L1's
5543 * nested page tables as the second level of translation. Basically
5544 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
5545 */
5546 if (!is_paging(vcpu))
5547 g_context->gva_to_gpa = nonpaging_gva_to_gpa;
5548 else if (is_long_mode(vcpu))
5549 g_context->gva_to_gpa = paging64_gva_to_gpa;
5550 else if (is_pae(vcpu))
5551 g_context->gva_to_gpa = paging64_gva_to_gpa;
5552 else
5553 g_context->gva_to_gpa = paging32_gva_to_gpa;
5554
5555 reset_guest_paging_metadata(vcpu, g_context);
5556 }
5557
kvm_init_mmu(struct kvm_vcpu * vcpu)5558 void kvm_init_mmu(struct kvm_vcpu *vcpu)
5559 {
5560 struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
5561 union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, ®s);
5562
5563 if (mmu_is_nested(vcpu))
5564 init_kvm_nested_mmu(vcpu, cpu_role);
5565 else if (tdp_enabled)
5566 init_kvm_tdp_mmu(vcpu, cpu_role);
5567 else
5568 init_kvm_softmmu(vcpu, cpu_role);
5569 }
5570 EXPORT_SYMBOL_GPL(kvm_init_mmu);
5571
kvm_mmu_after_set_cpuid(struct kvm_vcpu * vcpu)5572 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
5573 {
5574 /*
5575 * Invalidate all MMU roles to force them to reinitialize as CPUID
5576 * information is factored into reserved bit calculations.
5577 *
5578 * Correctly handling multiple vCPU models with respect to paging and
5579 * physical address properties) in a single VM would require tracking
5580 * all relevant CPUID information in kvm_mmu_page_role. That is very
5581 * undesirable as it would increase the memory requirements for
5582 * gfn_write_track (see struct kvm_mmu_page_role comments). For now
5583 * that problem is swept under the rug; KVM's CPUID API is horrific and
5584 * it's all but impossible to solve it without introducing a new API.
5585 */
5586 vcpu->arch.root_mmu.root_role.invalid = 1;
5587 vcpu->arch.guest_mmu.root_role.invalid = 1;
5588 vcpu->arch.nested_mmu.root_role.invalid = 1;
5589 vcpu->arch.root_mmu.cpu_role.ext.valid = 0;
5590 vcpu->arch.guest_mmu.cpu_role.ext.valid = 0;
5591 vcpu->arch.nested_mmu.cpu_role.ext.valid = 0;
5592 kvm_mmu_reset_context(vcpu);
5593
5594 /*
5595 * Changing guest CPUID after KVM_RUN is forbidden, see the comment in
5596 * kvm_arch_vcpu_ioctl().
5597 */
5598 KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm);
5599 }
5600
kvm_mmu_reset_context(struct kvm_vcpu * vcpu)5601 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
5602 {
5603 kvm_mmu_unload(vcpu);
5604 kvm_init_mmu(vcpu);
5605 }
5606 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
5607
kvm_mmu_load(struct kvm_vcpu * vcpu)5608 int kvm_mmu_load(struct kvm_vcpu *vcpu)
5609 {
5610 int r;
5611
5612 r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct);
5613 if (r)
5614 goto out;
5615 r = mmu_alloc_special_roots(vcpu);
5616 if (r)
5617 goto out;
5618 if (vcpu->arch.mmu->root_role.direct)
5619 r = mmu_alloc_direct_roots(vcpu);
5620 else
5621 r = mmu_alloc_shadow_roots(vcpu);
5622 if (r)
5623 goto out;
5624
5625 kvm_mmu_sync_roots(vcpu);
5626
5627 kvm_mmu_load_pgd(vcpu);
5628
5629 /*
5630 * Flush any TLB entries for the new root, the provenance of the root
5631 * is unknown. Even if KVM ensures there are no stale TLB entries
5632 * for a freed root, in theory another hypervisor could have left
5633 * stale entries. Flushing on alloc also allows KVM to skip the TLB
5634 * flush when freeing a root (see kvm_tdp_mmu_put_root()).
5635 */
5636 static_call(kvm_x86_flush_tlb_current)(vcpu);
5637 out:
5638 return r;
5639 }
5640
kvm_mmu_unload(struct kvm_vcpu * vcpu)5641 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
5642 {
5643 struct kvm *kvm = vcpu->kvm;
5644
5645 kvm_mmu_free_roots(kvm, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
5646 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.root_mmu.root.hpa));
5647 kvm_mmu_free_roots(kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5648 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa));
5649 vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
5650 }
5651
is_obsolete_root(struct kvm * kvm,hpa_t root_hpa)5652 static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
5653 {
5654 struct kvm_mmu_page *sp;
5655
5656 if (!VALID_PAGE(root_hpa))
5657 return false;
5658
5659 /*
5660 * When freeing obsolete roots, treat roots as obsolete if they don't
5661 * have an associated shadow page, as it's impossible to determine if
5662 * such roots are fresh or stale. This does mean KVM will get false
5663 * positives and free roots that don't strictly need to be freed, but
5664 * such false positives are relatively rare:
5665 *
5666 * (a) only PAE paging and nested NPT have roots without shadow pages
5667 * (or any shadow paging flavor with a dummy root, see note below)
5668 * (b) remote reloads due to a memslot update obsoletes _all_ roots
5669 * (c) KVM doesn't track previous roots for PAE paging, and the guest
5670 * is unlikely to zap an in-use PGD.
5671 *
5672 * Note! Dummy roots are unique in that they are obsoleted by memslot
5673 * _creation_! See also FNAME(fetch).
5674 */
5675 sp = root_to_sp(root_hpa);
5676 return !sp || is_obsolete_sp(kvm, sp);
5677 }
5678
__kvm_mmu_free_obsolete_roots(struct kvm * kvm,struct kvm_mmu * mmu)5679 static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
5680 {
5681 unsigned long roots_to_free = 0;
5682 int i;
5683
5684 if (is_obsolete_root(kvm, mmu->root.hpa))
5685 roots_to_free |= KVM_MMU_ROOT_CURRENT;
5686
5687 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5688 if (is_obsolete_root(kvm, mmu->prev_roots[i].hpa))
5689 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
5690 }
5691
5692 if (roots_to_free)
5693 kvm_mmu_free_roots(kvm, mmu, roots_to_free);
5694 }
5695
kvm_mmu_free_obsolete_roots(struct kvm_vcpu * vcpu)5696 void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
5697 {
5698 __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
5699 __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
5700 }
5701
mmu_pte_write_fetch_gpte(struct kvm_vcpu * vcpu,gpa_t * gpa,int * bytes)5702 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5703 int *bytes)
5704 {
5705 u64 gentry = 0;
5706 int r;
5707
5708 /*
5709 * Assume that the pte write on a page table of the same type
5710 * as the current vcpu paging mode since we update the sptes only
5711 * when they have the same mode.
5712 */
5713 if (is_pae(vcpu) && *bytes == 4) {
5714 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5715 *gpa &= ~(gpa_t)7;
5716 *bytes = 8;
5717 }
5718
5719 if (*bytes == 4 || *bytes == 8) {
5720 r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
5721 if (r)
5722 gentry = 0;
5723 }
5724
5725 return gentry;
5726 }
5727
5728 /*
5729 * If we're seeing too many writes to a page, it may no longer be a page table,
5730 * or we may be forking, in which case it is better to unmap the page.
5731 */
detect_write_flooding(struct kvm_mmu_page * sp)5732 static bool detect_write_flooding(struct kvm_mmu_page *sp)
5733 {
5734 /*
5735 * Skip write-flooding detected for the sp whose level is 1, because
5736 * it can become unsync, then the guest page is not write-protected.
5737 */
5738 if (sp->role.level == PG_LEVEL_4K)
5739 return false;
5740
5741 atomic_inc(&sp->write_flooding_count);
5742 return atomic_read(&sp->write_flooding_count) >= 3;
5743 }
5744
5745 /*
5746 * Misaligned accesses are too much trouble to fix up; also, they usually
5747 * indicate a page is not used as a page table.
5748 */
detect_write_misaligned(struct kvm_mmu_page * sp,gpa_t gpa,int bytes)5749 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
5750 int bytes)
5751 {
5752 unsigned offset, pte_size, misaligned;
5753
5754 offset = offset_in_page(gpa);
5755 pte_size = sp->role.has_4_byte_gpte ? 4 : 8;
5756
5757 /*
5758 * Sometimes, the OS only writes the last one bytes to update status
5759 * bits, for example, in linux, andb instruction is used in clear_bit().
5760 */
5761 if (!(offset & (pte_size - 1)) && bytes == 1)
5762 return false;
5763
5764 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
5765 misaligned |= bytes < 4;
5766
5767 return misaligned;
5768 }
5769
get_written_sptes(struct kvm_mmu_page * sp,gpa_t gpa,int * nspte)5770 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
5771 {
5772 unsigned page_offset, quadrant;
5773 u64 *spte;
5774 int level;
5775
5776 page_offset = offset_in_page(gpa);
5777 level = sp->role.level;
5778 *nspte = 1;
5779 if (sp->role.has_4_byte_gpte) {
5780 page_offset <<= 1; /* 32->64 */
5781 /*
5782 * A 32-bit pde maps 4MB while the shadow pdes map
5783 * only 2MB. So we need to double the offset again
5784 * and zap two pdes instead of one.
5785 */
5786 if (level == PT32_ROOT_LEVEL) {
5787 page_offset &= ~7; /* kill rounding error */
5788 page_offset <<= 1;
5789 *nspte = 2;
5790 }
5791 quadrant = page_offset >> PAGE_SHIFT;
5792 page_offset &= ~PAGE_MASK;
5793 if (quadrant != sp->role.quadrant)
5794 return NULL;
5795 }
5796
5797 spte = &sp->spt[page_offset / sizeof(*spte)];
5798 return spte;
5799 }
5800
kvm_mmu_track_write(struct kvm_vcpu * vcpu,gpa_t gpa,const u8 * new,int bytes)5801 void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
5802 int bytes)
5803 {
5804 gfn_t gfn = gpa >> PAGE_SHIFT;
5805 struct kvm_mmu_page *sp;
5806 LIST_HEAD(invalid_list);
5807 u64 entry, gentry, *spte;
5808 int npte;
5809 bool flush = false;
5810
5811 /*
5812 * When emulating guest writes, ensure the written value is visible to
5813 * any task that is handling page faults before checking whether or not
5814 * KVM is shadowing a guest PTE. This ensures either KVM will create
5815 * the correct SPTE in the page fault handler, or this task will see
5816 * a non-zero indirect_shadow_pages. Pairs with the smp_mb() in
5817 * account_shadowed().
5818 */
5819 smp_mb();
5820 if (!vcpu->kvm->arch.indirect_shadow_pages)
5821 return;
5822
5823 write_lock(&vcpu->kvm->mmu_lock);
5824
5825 gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5826
5827 ++vcpu->kvm->stat.mmu_pte_write;
5828
5829 for_each_gfn_valid_sp_with_gptes(vcpu->kvm, sp, gfn) {
5830 if (detect_write_misaligned(sp, gpa, bytes) ||
5831 detect_write_flooding(sp)) {
5832 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
5833 ++vcpu->kvm->stat.mmu_flooded;
5834 continue;
5835 }
5836
5837 spte = get_written_sptes(sp, gpa, &npte);
5838 if (!spte)
5839 continue;
5840
5841 while (npte--) {
5842 entry = *spte;
5843 mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5844 if (gentry && sp->role.level != PG_LEVEL_4K)
5845 ++vcpu->kvm->stat.mmu_pde_zapped;
5846 if (is_shadow_present_pte(entry))
5847 flush = true;
5848 ++spte;
5849 }
5850 }
5851 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
5852 write_unlock(&vcpu->kvm->mmu_lock);
5853 }
5854
kvm_mmu_page_fault(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,u64 error_code,void * insn,int insn_len)5855 int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5856 void *insn, int insn_len)
5857 {
5858 int r, emulation_type = EMULTYPE_PF;
5859 bool direct = vcpu->arch.mmu->root_role.direct;
5860
5861 if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
5862 return RET_PF_RETRY;
5863
5864 /*
5865 * Except for reserved faults (emulated MMIO is shared-only), set the
5866 * PFERR_PRIVATE_ACCESS flag for software-protected VMs based on the gfn's
5867 * current attributes, which are the source of truth for such VMs. Note,
5868 * this wrong for nested MMUs as the GPA is an L2 GPA, but KVM doesn't
5869 * currently supported nested virtualization (among many other things)
5870 * for software-protected VMs.
5871 */
5872 if (IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) &&
5873 !(error_code & PFERR_RSVD_MASK) &&
5874 vcpu->kvm->arch.vm_type == KVM_X86_SW_PROTECTED_VM &&
5875 kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(cr2_or_gpa)))
5876 error_code |= PFERR_PRIVATE_ACCESS;
5877
5878 r = RET_PF_INVALID;
5879 if (unlikely(error_code & PFERR_RSVD_MASK)) {
5880 if (WARN_ON_ONCE(error_code & PFERR_PRIVATE_ACCESS))
5881 return -EFAULT;
5882
5883 r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5884 if (r == RET_PF_EMULATE)
5885 goto emulate;
5886 }
5887
5888 if (r == RET_PF_INVALID) {
5889 r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, error_code, false,
5890 &emulation_type);
5891 if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
5892 return -EIO;
5893 }
5894
5895 if (r < 0)
5896 return r;
5897 if (r != RET_PF_EMULATE)
5898 return 1;
5899
5900 /*
5901 * Before emulating the instruction, check if the error code
5902 * was due to a RO violation while translating the guest page.
5903 * This can occur when using nested virtualization with nested
5904 * paging in both guests. If true, we simply unprotect the page
5905 * and resume the guest.
5906 */
5907 if (vcpu->arch.mmu->root_role.direct &&
5908 (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5909 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5910 return 1;
5911 }
5912
5913 /*
5914 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
5915 * optimistically try to just unprotect the page and let the processor
5916 * re-execute the instruction that caused the page fault. Do not allow
5917 * retrying MMIO emulation, as it's not only pointless but could also
5918 * cause us to enter an infinite loop because the processor will keep
5919 * faulting on the non-existent MMIO address. Retrying an instruction
5920 * from a nested guest is also pointless and dangerous as we are only
5921 * explicitly shadowing L1's page tables, i.e. unprotecting something
5922 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5923 */
5924 if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
5925 emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5926 emulate:
5927 return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5928 insn_len);
5929 }
5930 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
5931
kvm_mmu_print_sptes(struct kvm_vcpu * vcpu,gpa_t gpa,const char * msg)5932 void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg)
5933 {
5934 u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
5935 int root_level, leaf, level;
5936
5937 leaf = get_sptes_lockless(vcpu, gpa, sptes, &root_level);
5938 if (unlikely(leaf < 0))
5939 return;
5940
5941 pr_err("%s %llx", msg, gpa);
5942 for (level = root_level; level >= leaf; level--)
5943 pr_cont(", spte[%d] = 0x%llx", level, sptes[level]);
5944 pr_cont("\n");
5945 }
5946 EXPORT_SYMBOL_GPL(kvm_mmu_print_sptes);
5947
__kvm_mmu_invalidate_addr(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,u64 addr,hpa_t root_hpa)5948 static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
5949 u64 addr, hpa_t root_hpa)
5950 {
5951 struct kvm_shadow_walk_iterator iterator;
5952
5953 vcpu_clear_mmio_info(vcpu, addr);
5954
5955 /*
5956 * Walking and synchronizing SPTEs both assume they are operating in
5957 * the context of the current MMU, and would need to be reworked if
5958 * this is ever used to sync the guest_mmu, e.g. to emulate INVEPT.
5959 */
5960 if (WARN_ON_ONCE(mmu != vcpu->arch.mmu))
5961 return;
5962
5963 if (!VALID_PAGE(root_hpa))
5964 return;
5965
5966 write_lock(&vcpu->kvm->mmu_lock);
5967 for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) {
5968 struct kvm_mmu_page *sp = sptep_to_sp(iterator.sptep);
5969
5970 if (sp->unsync) {
5971 int ret = kvm_sync_spte(vcpu, sp, iterator.index);
5972
5973 if (ret < 0)
5974 mmu_page_zap_pte(vcpu->kvm, sp, iterator.sptep, NULL);
5975 if (ret)
5976 kvm_flush_remote_tlbs_sptep(vcpu->kvm, iterator.sptep);
5977 }
5978
5979 if (!sp->unsync_children)
5980 break;
5981 }
5982 write_unlock(&vcpu->kvm->mmu_lock);
5983 }
5984
kvm_mmu_invalidate_addr(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,u64 addr,unsigned long roots)5985 void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
5986 u64 addr, unsigned long roots)
5987 {
5988 int i;
5989
5990 WARN_ON_ONCE(roots & ~KVM_MMU_ROOTS_ALL);
5991
5992 /* It's actually a GPA for vcpu->arch.guest_mmu. */
5993 if (mmu != &vcpu->arch.guest_mmu) {
5994 /* INVLPG on a non-canonical address is a NOP according to the SDM. */
5995 if (is_noncanonical_address(addr, vcpu))
5996 return;
5997
5998 static_call(kvm_x86_flush_tlb_gva)(vcpu, addr);
5999 }
6000
6001 if (!mmu->sync_spte)
6002 return;
6003
6004 if (roots & KVM_MMU_ROOT_CURRENT)
6005 __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->root.hpa);
6006
6007 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
6008 if (roots & KVM_MMU_ROOT_PREVIOUS(i))
6009 __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa);
6010 }
6011 }
6012 EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_addr);
6013
kvm_mmu_invlpg(struct kvm_vcpu * vcpu,gva_t gva)6014 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
6015 {
6016 /*
6017 * INVLPG is required to invalidate any global mappings for the VA,
6018 * irrespective of PCID. Blindly sync all roots as it would take
6019 * roughly the same amount of work/time to determine whether any of the
6020 * previous roots have a global mapping.
6021 *
6022 * Mappings not reachable via the current or previous cached roots will
6023 * be synced when switching to that new cr3, so nothing needs to be
6024 * done here for them.
6025 */
6026 kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL);
6027 ++vcpu->stat.invlpg;
6028 }
6029 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
6030
6031
kvm_mmu_invpcid_gva(struct kvm_vcpu * vcpu,gva_t gva,unsigned long pcid)6032 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
6033 {
6034 struct kvm_mmu *mmu = vcpu->arch.mmu;
6035 unsigned long roots = 0;
6036 uint i;
6037
6038 if (pcid == kvm_get_active_pcid(vcpu))
6039 roots |= KVM_MMU_ROOT_CURRENT;
6040
6041 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
6042 if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
6043 pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd))
6044 roots |= KVM_MMU_ROOT_PREVIOUS(i);
6045 }
6046
6047 if (roots)
6048 kvm_mmu_invalidate_addr(vcpu, mmu, gva, roots);
6049 ++vcpu->stat.invlpg;
6050
6051 /*
6052 * Mappings not reachable via the current cr3 or the prev_roots will be
6053 * synced when switching to that cr3, so nothing needs to be done here
6054 * for them.
6055 */
6056 }
6057
kvm_configure_mmu(bool enable_tdp,int tdp_forced_root_level,int tdp_max_root_level,int tdp_huge_page_level)6058 void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
6059 int tdp_max_root_level, int tdp_huge_page_level)
6060 {
6061 tdp_enabled = enable_tdp;
6062 tdp_root_level = tdp_forced_root_level;
6063 max_tdp_level = tdp_max_root_level;
6064
6065 #ifdef CONFIG_X86_64
6066 tdp_mmu_enabled = tdp_mmu_allowed && tdp_enabled;
6067 #endif
6068 /*
6069 * max_huge_page_level reflects KVM's MMU capabilities irrespective
6070 * of kernel support, e.g. KVM may be capable of using 1GB pages when
6071 * the kernel is not. But, KVM never creates a page size greater than
6072 * what is used by the kernel for any given HVA, i.e. the kernel's
6073 * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
6074 */
6075 if (tdp_enabled)
6076 max_huge_page_level = tdp_huge_page_level;
6077 else if (boot_cpu_has(X86_FEATURE_GBPAGES))
6078 max_huge_page_level = PG_LEVEL_1G;
6079 else
6080 max_huge_page_level = PG_LEVEL_2M;
6081 }
6082 EXPORT_SYMBOL_GPL(kvm_configure_mmu);
6083
6084 /* The return value indicates if tlb flush on all vcpus is needed. */
6085 typedef bool (*slot_rmaps_handler) (struct kvm *kvm,
6086 struct kvm_rmap_head *rmap_head,
6087 const struct kvm_memory_slot *slot);
6088
__walk_slot_rmaps(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,int start_level,int end_level,gfn_t start_gfn,gfn_t end_gfn,bool flush_on_yield,bool flush)6089 static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
6090 const struct kvm_memory_slot *slot,
6091 slot_rmaps_handler fn,
6092 int start_level, int end_level,
6093 gfn_t start_gfn, gfn_t end_gfn,
6094 bool flush_on_yield, bool flush)
6095 {
6096 struct slot_rmap_walk_iterator iterator;
6097
6098 lockdep_assert_held_write(&kvm->mmu_lock);
6099
6100 for_each_slot_rmap_range(slot, start_level, end_level, start_gfn,
6101 end_gfn, &iterator) {
6102 if (iterator.rmap)
6103 flush |= fn(kvm, iterator.rmap, slot);
6104
6105 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6106 if (flush && flush_on_yield) {
6107 kvm_flush_remote_tlbs_range(kvm, start_gfn,
6108 iterator.gfn - start_gfn + 1);
6109 flush = false;
6110 }
6111 cond_resched_rwlock_write(&kvm->mmu_lock);
6112 }
6113 }
6114
6115 return flush;
6116 }
6117
walk_slot_rmaps(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,int start_level,int end_level,bool flush_on_yield)6118 static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
6119 const struct kvm_memory_slot *slot,
6120 slot_rmaps_handler fn,
6121 int start_level, int end_level,
6122 bool flush_on_yield)
6123 {
6124 return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
6125 slot->base_gfn, slot->base_gfn + slot->npages - 1,
6126 flush_on_yield, false);
6127 }
6128
walk_slot_rmaps_4k(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,bool flush_on_yield)6129 static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
6130 const struct kvm_memory_slot *slot,
6131 slot_rmaps_handler fn,
6132 bool flush_on_yield)
6133 {
6134 return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield);
6135 }
6136
free_mmu_pages(struct kvm_mmu * mmu)6137 static void free_mmu_pages(struct kvm_mmu *mmu)
6138 {
6139 if (!tdp_enabled && mmu->pae_root)
6140 set_memory_encrypted((unsigned long)mmu->pae_root, 1);
6141 free_page((unsigned long)mmu->pae_root);
6142 free_page((unsigned long)mmu->pml4_root);
6143 free_page((unsigned long)mmu->pml5_root);
6144 }
6145
__kvm_mmu_create(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)6146 static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
6147 {
6148 struct page *page;
6149 int i;
6150
6151 mmu->root.hpa = INVALID_PAGE;
6152 mmu->root.pgd = 0;
6153 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
6154 mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
6155
6156 /* vcpu->arch.guest_mmu isn't used when !tdp_enabled. */
6157 if (!tdp_enabled && mmu == &vcpu->arch.guest_mmu)
6158 return 0;
6159
6160 /*
6161 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
6162 * while the PDP table is a per-vCPU construct that's allocated at MMU
6163 * creation. When emulating 32-bit mode, cr3 is only 32 bits even on
6164 * x86_64. Therefore we need to allocate the PDP table in the first
6165 * 4GB of memory, which happens to fit the DMA32 zone. TDP paging
6166 * generally doesn't use PAE paging and can skip allocating the PDP
6167 * table. The main exception, handled here, is SVM's 32-bit NPT. The
6168 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
6169 * KVM; that horror is handled on-demand by mmu_alloc_special_roots().
6170 */
6171 if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
6172 return 0;
6173
6174 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
6175 if (!page)
6176 return -ENOMEM;
6177
6178 mmu->pae_root = page_address(page);
6179
6180 /*
6181 * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
6182 * get the CPU to treat the PDPTEs as encrypted. Decrypt the page so
6183 * that KVM's writes and the CPU's reads get along. Note, this is
6184 * only necessary when using shadow paging, as 64-bit NPT can get at
6185 * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
6186 * by 32-bit kernels (when KVM itself uses 32-bit NPT).
6187 */
6188 if (!tdp_enabled)
6189 set_memory_decrypted((unsigned long)mmu->pae_root, 1);
6190 else
6191 WARN_ON_ONCE(shadow_me_value);
6192
6193 for (i = 0; i < 4; ++i)
6194 mmu->pae_root[i] = INVALID_PAE_ROOT;
6195
6196 return 0;
6197 }
6198
kvm_mmu_create(struct kvm_vcpu * vcpu)6199 int kvm_mmu_create(struct kvm_vcpu *vcpu)
6200 {
6201 int ret;
6202
6203 vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
6204 vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
6205
6206 vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
6207 vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
6208
6209 vcpu->arch.mmu_shadow_page_cache.init_value =
6210 SHADOW_NONPRESENT_VALUE;
6211 if (!vcpu->arch.mmu_shadow_page_cache.init_value)
6212 vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
6213
6214 vcpu->arch.mmu = &vcpu->arch.root_mmu;
6215 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
6216
6217 ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
6218 if (ret)
6219 return ret;
6220
6221 ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
6222 if (ret)
6223 goto fail_allocate_root;
6224
6225 return ret;
6226 fail_allocate_root:
6227 free_mmu_pages(&vcpu->arch.guest_mmu);
6228 return ret;
6229 }
6230
6231 #define BATCH_ZAP_PAGES 10
kvm_zap_obsolete_pages(struct kvm * kvm)6232 static void kvm_zap_obsolete_pages(struct kvm *kvm)
6233 {
6234 struct kvm_mmu_page *sp, *node;
6235 int nr_zapped, batch = 0;
6236 bool unstable;
6237
6238 restart:
6239 list_for_each_entry_safe_reverse(sp, node,
6240 &kvm->arch.active_mmu_pages, link) {
6241 /*
6242 * No obsolete valid page exists before a newly created page
6243 * since active_mmu_pages is a FIFO list.
6244 */
6245 if (!is_obsolete_sp(kvm, sp))
6246 break;
6247
6248 /*
6249 * Invalid pages should never land back on the list of active
6250 * pages. Skip the bogus page, otherwise we'll get stuck in an
6251 * infinite loop if the page gets put back on the list (again).
6252 */
6253 if (WARN_ON_ONCE(sp->role.invalid))
6254 continue;
6255
6256 /*
6257 * No need to flush the TLB since we're only zapping shadow
6258 * pages with an obsolete generation number and all vCPUS have
6259 * loaded a new root, i.e. the shadow pages being zapped cannot
6260 * be in active use by the guest.
6261 */
6262 if (batch >= BATCH_ZAP_PAGES &&
6263 cond_resched_rwlock_write(&kvm->mmu_lock)) {
6264 batch = 0;
6265 goto restart;
6266 }
6267
6268 unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
6269 &kvm->arch.zapped_obsolete_pages, &nr_zapped);
6270 batch += nr_zapped;
6271
6272 if (unstable)
6273 goto restart;
6274 }
6275
6276 /*
6277 * Kick all vCPUs (via remote TLB flush) before freeing the page tables
6278 * to ensure KVM is not in the middle of a lockless shadow page table
6279 * walk, which may reference the pages. The remote TLB flush itself is
6280 * not required and is simply a convenient way to kick vCPUs as needed.
6281 * KVM performs a local TLB flush when allocating a new root (see
6282 * kvm_mmu_load()), and the reload in the caller ensure no vCPUs are
6283 * running with an obsolete MMU.
6284 */
6285 kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
6286 }
6287
6288 /*
6289 * Fast invalidate all shadow pages and use lock-break technique
6290 * to zap obsolete pages.
6291 *
6292 * It's required when memslot is being deleted or VM is being
6293 * destroyed, in these cases, we should ensure that KVM MMU does
6294 * not use any resource of the being-deleted slot or all slots
6295 * after calling the function.
6296 */
kvm_mmu_zap_all_fast(struct kvm * kvm)6297 static void kvm_mmu_zap_all_fast(struct kvm *kvm)
6298 {
6299 lockdep_assert_held(&kvm->slots_lock);
6300
6301 write_lock(&kvm->mmu_lock);
6302 trace_kvm_mmu_zap_all_fast(kvm);
6303
6304 /*
6305 * Toggle mmu_valid_gen between '0' and '1'. Because slots_lock is
6306 * held for the entire duration of zapping obsolete pages, it's
6307 * impossible for there to be multiple invalid generations associated
6308 * with *valid* shadow pages at any given time, i.e. there is exactly
6309 * one valid generation and (at most) one invalid generation.
6310 */
6311 kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
6312
6313 /*
6314 * In order to ensure all vCPUs drop their soon-to-be invalid roots,
6315 * invalidating TDP MMU roots must be done while holding mmu_lock for
6316 * write and in the same critical section as making the reload request,
6317 * e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield.
6318 */
6319 if (tdp_mmu_enabled)
6320 kvm_tdp_mmu_invalidate_all_roots(kvm);
6321
6322 /*
6323 * Notify all vcpus to reload its shadow page table and flush TLB.
6324 * Then all vcpus will switch to new shadow page table with the new
6325 * mmu_valid_gen.
6326 *
6327 * Note: we need to do this under the protection of mmu_lock,
6328 * otherwise, vcpu would purge shadow page but miss tlb flush.
6329 */
6330 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS);
6331
6332 kvm_zap_obsolete_pages(kvm);
6333
6334 write_unlock(&kvm->mmu_lock);
6335
6336 /*
6337 * Zap the invalidated TDP MMU roots, all SPTEs must be dropped before
6338 * returning to the caller, e.g. if the zap is in response to a memslot
6339 * deletion, mmu_notifier callbacks will be unable to reach the SPTEs
6340 * associated with the deleted memslot once the update completes, and
6341 * Deferring the zap until the final reference to the root is put would
6342 * lead to use-after-free.
6343 */
6344 if (tdp_mmu_enabled)
6345 kvm_tdp_mmu_zap_invalidated_roots(kvm);
6346 }
6347
kvm_has_zapped_obsolete_pages(struct kvm * kvm)6348 static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
6349 {
6350 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
6351 }
6352
kvm_mmu_init_vm(struct kvm * kvm)6353 void kvm_mmu_init_vm(struct kvm *kvm)
6354 {
6355 kvm->arch.shadow_mmio_value = shadow_mmio_value;
6356 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6357 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
6358 INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
6359 spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
6360
6361 if (tdp_mmu_enabled)
6362 kvm_mmu_init_tdp_mmu(kvm);
6363
6364 kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
6365 kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
6366
6367 kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
6368
6369 kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
6370 kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
6371 }
6372
mmu_free_vm_memory_caches(struct kvm * kvm)6373 static void mmu_free_vm_memory_caches(struct kvm *kvm)
6374 {
6375 kvm_mmu_free_memory_cache(&kvm->arch.split_desc_cache);
6376 kvm_mmu_free_memory_cache(&kvm->arch.split_page_header_cache);
6377 kvm_mmu_free_memory_cache(&kvm->arch.split_shadow_page_cache);
6378 }
6379
kvm_mmu_uninit_vm(struct kvm * kvm)6380 void kvm_mmu_uninit_vm(struct kvm *kvm)
6381 {
6382 if (tdp_mmu_enabled)
6383 kvm_mmu_uninit_tdp_mmu(kvm);
6384
6385 mmu_free_vm_memory_caches(kvm);
6386 }
6387
kvm_rmap_zap_gfn_range(struct kvm * kvm,gfn_t gfn_start,gfn_t gfn_end)6388 static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
6389 {
6390 const struct kvm_memory_slot *memslot;
6391 struct kvm_memslots *slots;
6392 struct kvm_memslot_iter iter;
6393 bool flush = false;
6394 gfn_t start, end;
6395 int i;
6396
6397 if (!kvm_memslots_have_rmaps(kvm))
6398 return flush;
6399
6400 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
6401 slots = __kvm_memslots(kvm, i);
6402
6403 kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) {
6404 memslot = iter.slot;
6405 start = max(gfn_start, memslot->base_gfn);
6406 end = min(gfn_end, memslot->base_gfn + memslot->npages);
6407 if (WARN_ON_ONCE(start >= end))
6408 continue;
6409
6410 flush = __walk_slot_rmaps(kvm, memslot, __kvm_zap_rmap,
6411 PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
6412 start, end - 1, true, flush);
6413 }
6414 }
6415
6416 return flush;
6417 }
6418
6419 /*
6420 * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
6421 * (not including it)
6422 */
kvm_zap_gfn_range(struct kvm * kvm,gfn_t gfn_start,gfn_t gfn_end)6423 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
6424 {
6425 bool flush;
6426
6427 if (WARN_ON_ONCE(gfn_end <= gfn_start))
6428 return;
6429
6430 write_lock(&kvm->mmu_lock);
6431
6432 kvm_mmu_invalidate_begin(kvm);
6433
6434 kvm_mmu_invalidate_range_add(kvm, gfn_start, gfn_end);
6435
6436 flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
6437
6438 if (tdp_mmu_enabled)
6439 flush = kvm_tdp_mmu_zap_leafs(kvm, gfn_start, gfn_end, flush);
6440
6441 if (flush)
6442 kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);
6443
6444 kvm_mmu_invalidate_end(kvm);
6445
6446 write_unlock(&kvm->mmu_lock);
6447 }
6448
slot_rmap_write_protect(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)6449 static bool slot_rmap_write_protect(struct kvm *kvm,
6450 struct kvm_rmap_head *rmap_head,
6451 const struct kvm_memory_slot *slot)
6452 {
6453 return rmap_write_protect(rmap_head, false);
6454 }
6455
kvm_mmu_slot_remove_write_access(struct kvm * kvm,const struct kvm_memory_slot * memslot,int start_level)6456 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
6457 const struct kvm_memory_slot *memslot,
6458 int start_level)
6459 {
6460 if (kvm_memslots_have_rmaps(kvm)) {
6461 write_lock(&kvm->mmu_lock);
6462 walk_slot_rmaps(kvm, memslot, slot_rmap_write_protect,
6463 start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
6464 write_unlock(&kvm->mmu_lock);
6465 }
6466
6467 if (tdp_mmu_enabled) {
6468 read_lock(&kvm->mmu_lock);
6469 kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
6470 read_unlock(&kvm->mmu_lock);
6471 }
6472 }
6473
need_topup(struct kvm_mmu_memory_cache * cache,int min)6474 static inline bool need_topup(struct kvm_mmu_memory_cache *cache, int min)
6475 {
6476 return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
6477 }
6478
need_topup_split_caches_or_resched(struct kvm * kvm)6479 static bool need_topup_split_caches_or_resched(struct kvm *kvm)
6480 {
6481 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
6482 return true;
6483
6484 /*
6485 * In the worst case, SPLIT_DESC_CACHE_MIN_NR_OBJECTS descriptors are needed
6486 * to split a single huge page. Calculating how many are actually needed
6487 * is possible but not worth the complexity.
6488 */
6489 return need_topup(&kvm->arch.split_desc_cache, SPLIT_DESC_CACHE_MIN_NR_OBJECTS) ||
6490 need_topup(&kvm->arch.split_page_header_cache, 1) ||
6491 need_topup(&kvm->arch.split_shadow_page_cache, 1);
6492 }
6493
topup_split_caches(struct kvm * kvm)6494 static int topup_split_caches(struct kvm *kvm)
6495 {
6496 /*
6497 * Allocating rmap list entries when splitting huge pages for nested
6498 * MMUs is uncommon as KVM needs to use a list if and only if there is
6499 * more than one rmap entry for a gfn, i.e. requires an L1 gfn to be
6500 * aliased by multiple L2 gfns and/or from multiple nested roots with
6501 * different roles. Aliasing gfns when using TDP is atypical for VMMs;
6502 * a few gfns are often aliased during boot, e.g. when remapping BIOS,
6503 * but aliasing rarely occurs post-boot or for many gfns. If there is
6504 * only one rmap entry, rmap->val points directly at that one entry and
6505 * doesn't need to allocate a list. Buffer the cache by the default
6506 * capacity so that KVM doesn't have to drop mmu_lock to topup if KVM
6507 * encounters an aliased gfn or two.
6508 */
6509 const int capacity = SPLIT_DESC_CACHE_MIN_NR_OBJECTS +
6510 KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE;
6511 int r;
6512
6513 lockdep_assert_held(&kvm->slots_lock);
6514
6515 r = __kvm_mmu_topup_memory_cache(&kvm->arch.split_desc_cache, capacity,
6516 SPLIT_DESC_CACHE_MIN_NR_OBJECTS);
6517 if (r)
6518 return r;
6519
6520 r = kvm_mmu_topup_memory_cache(&kvm->arch.split_page_header_cache, 1);
6521 if (r)
6522 return r;
6523
6524 return kvm_mmu_topup_memory_cache(&kvm->arch.split_shadow_page_cache, 1);
6525 }
6526
shadow_mmu_get_sp_for_split(struct kvm * kvm,u64 * huge_sptep)6527 static struct kvm_mmu_page *shadow_mmu_get_sp_for_split(struct kvm *kvm, u64 *huge_sptep)
6528 {
6529 struct kvm_mmu_page *huge_sp = sptep_to_sp(huge_sptep);
6530 struct shadow_page_caches caches = {};
6531 union kvm_mmu_page_role role;
6532 unsigned int access;
6533 gfn_t gfn;
6534
6535 gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
6536 access = kvm_mmu_page_get_access(huge_sp, spte_index(huge_sptep));
6537
6538 /*
6539 * Note, huge page splitting always uses direct shadow pages, regardless
6540 * of whether the huge page itself is mapped by a direct or indirect
6541 * shadow page, since the huge page region itself is being directly
6542 * mapped with smaller pages.
6543 */
6544 role = kvm_mmu_child_role(huge_sptep, /*direct=*/true, access);
6545
6546 /* Direct SPs do not require a shadowed_info_cache. */
6547 caches.page_header_cache = &kvm->arch.split_page_header_cache;
6548 caches.shadow_page_cache = &kvm->arch.split_shadow_page_cache;
6549
6550 /* Safe to pass NULL for vCPU since requesting a direct SP. */
6551 return __kvm_mmu_get_shadow_page(kvm, NULL, &caches, gfn, role);
6552 }
6553
shadow_mmu_split_huge_page(struct kvm * kvm,const struct kvm_memory_slot * slot,u64 * huge_sptep)6554 static void shadow_mmu_split_huge_page(struct kvm *kvm,
6555 const struct kvm_memory_slot *slot,
6556 u64 *huge_sptep)
6557
6558 {
6559 struct kvm_mmu_memory_cache *cache = &kvm->arch.split_desc_cache;
6560 u64 huge_spte = READ_ONCE(*huge_sptep);
6561 struct kvm_mmu_page *sp;
6562 bool flush = false;
6563 u64 *sptep, spte;
6564 gfn_t gfn;
6565 int index;
6566
6567 sp = shadow_mmu_get_sp_for_split(kvm, huge_sptep);
6568
6569 for (index = 0; index < SPTE_ENT_PER_PAGE; index++) {
6570 sptep = &sp->spt[index];
6571 gfn = kvm_mmu_page_get_gfn(sp, index);
6572
6573 /*
6574 * The SP may already have populated SPTEs, e.g. if this huge
6575 * page is aliased by multiple sptes with the same access
6576 * permissions. These entries are guaranteed to map the same
6577 * gfn-to-pfn translation since the SP is direct, so no need to
6578 * modify them.
6579 *
6580 * However, if a given SPTE points to a lower level page table,
6581 * that lower level page table may only be partially populated.
6582 * Installing such SPTEs would effectively unmap a potion of the
6583 * huge page. Unmapping guest memory always requires a TLB flush
6584 * since a subsequent operation on the unmapped regions would
6585 * fail to detect the need to flush.
6586 */
6587 if (is_shadow_present_pte(*sptep)) {
6588 flush |= !is_last_spte(*sptep, sp->role.level);
6589 continue;
6590 }
6591
6592 spte = make_huge_page_split_spte(kvm, huge_spte, sp->role, index);
6593 mmu_spte_set(sptep, spte);
6594 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);
6595 }
6596
6597 __link_shadow_page(kvm, cache, huge_sptep, sp, flush);
6598 }
6599
shadow_mmu_try_split_huge_page(struct kvm * kvm,const struct kvm_memory_slot * slot,u64 * huge_sptep)6600 static int shadow_mmu_try_split_huge_page(struct kvm *kvm,
6601 const struct kvm_memory_slot *slot,
6602 u64 *huge_sptep)
6603 {
6604 struct kvm_mmu_page *huge_sp = sptep_to_sp(huge_sptep);
6605 int level, r = 0;
6606 gfn_t gfn;
6607 u64 spte;
6608
6609 /* Grab information for the tracepoint before dropping the MMU lock. */
6610 gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
6611 level = huge_sp->role.level;
6612 spte = *huge_sptep;
6613
6614 if (kvm_mmu_available_pages(kvm) <= KVM_MIN_FREE_MMU_PAGES) {
6615 r = -ENOSPC;
6616 goto out;
6617 }
6618
6619 if (need_topup_split_caches_or_resched(kvm)) {
6620 write_unlock(&kvm->mmu_lock);
6621 cond_resched();
6622 /*
6623 * If the topup succeeds, return -EAGAIN to indicate that the
6624 * rmap iterator should be restarted because the MMU lock was
6625 * dropped.
6626 */
6627 r = topup_split_caches(kvm) ?: -EAGAIN;
6628 write_lock(&kvm->mmu_lock);
6629 goto out;
6630 }
6631
6632 shadow_mmu_split_huge_page(kvm, slot, huge_sptep);
6633
6634 out:
6635 trace_kvm_mmu_split_huge_page(gfn, spte, level, r);
6636 return r;
6637 }
6638
shadow_mmu_try_split_huge_pages(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)6639 static bool shadow_mmu_try_split_huge_pages(struct kvm *kvm,
6640 struct kvm_rmap_head *rmap_head,
6641 const struct kvm_memory_slot *slot)
6642 {
6643 struct rmap_iterator iter;
6644 struct kvm_mmu_page *sp;
6645 u64 *huge_sptep;
6646 int r;
6647
6648 restart:
6649 for_each_rmap_spte(rmap_head, &iter, huge_sptep) {
6650 sp = sptep_to_sp(huge_sptep);
6651
6652 /* TDP MMU is enabled, so rmap only contains nested MMU SPs. */
6653 if (WARN_ON_ONCE(!sp->role.guest_mode))
6654 continue;
6655
6656 /* The rmaps should never contain non-leaf SPTEs. */
6657 if (WARN_ON_ONCE(!is_large_pte(*huge_sptep)))
6658 continue;
6659
6660 /* SPs with level >PG_LEVEL_4K should never by unsync. */
6661 if (WARN_ON_ONCE(sp->unsync))
6662 continue;
6663
6664 /* Don't bother splitting huge pages on invalid SPs. */
6665 if (sp->role.invalid)
6666 continue;
6667
6668 r = shadow_mmu_try_split_huge_page(kvm, slot, huge_sptep);
6669
6670 /*
6671 * The split succeeded or needs to be retried because the MMU
6672 * lock was dropped. Either way, restart the iterator to get it
6673 * back into a consistent state.
6674 */
6675 if (!r || r == -EAGAIN)
6676 goto restart;
6677
6678 /* The split failed and shouldn't be retried (e.g. -ENOMEM). */
6679 break;
6680 }
6681
6682 return false;
6683 }
6684
kvm_shadow_mmu_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t start,gfn_t end,int target_level)6685 static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm,
6686 const struct kvm_memory_slot *slot,
6687 gfn_t start, gfn_t end,
6688 int target_level)
6689 {
6690 int level;
6691
6692 /*
6693 * Split huge pages starting with KVM_MAX_HUGEPAGE_LEVEL and working
6694 * down to the target level. This ensures pages are recursively split
6695 * all the way to the target level. There's no need to split pages
6696 * already at the target level.
6697 */
6698 for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--)
6699 __walk_slot_rmaps(kvm, slot, shadow_mmu_try_split_huge_pages,
6700 level, level, start, end - 1, true, false);
6701 }
6702
6703 /* Must be called with the mmu_lock held in write-mode. */
kvm_mmu_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * memslot,u64 start,u64 end,int target_level)6704 void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
6705 const struct kvm_memory_slot *memslot,
6706 u64 start, u64 end,
6707 int target_level)
6708 {
6709 if (!tdp_mmu_enabled)
6710 return;
6711
6712 if (kvm_memslots_have_rmaps(kvm))
6713 kvm_shadow_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level);
6714
6715 kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, false);
6716
6717 /*
6718 * A TLB flush is unnecessary at this point for the same reasons as in
6719 * kvm_mmu_slot_try_split_huge_pages().
6720 */
6721 }
6722
kvm_mmu_slot_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * memslot,int target_level)6723 void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
6724 const struct kvm_memory_slot *memslot,
6725 int target_level)
6726 {
6727 u64 start = memslot->base_gfn;
6728 u64 end = start + memslot->npages;
6729
6730 if (!tdp_mmu_enabled)
6731 return;
6732
6733 if (kvm_memslots_have_rmaps(kvm)) {
6734 write_lock(&kvm->mmu_lock);
6735 kvm_shadow_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level);
6736 write_unlock(&kvm->mmu_lock);
6737 }
6738
6739 read_lock(&kvm->mmu_lock);
6740 kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, true);
6741 read_unlock(&kvm->mmu_lock);
6742
6743 /*
6744 * No TLB flush is necessary here. KVM will flush TLBs after
6745 * write-protecting and/or clearing dirty on the newly split SPTEs to
6746 * ensure that guest writes are reflected in the dirty log before the
6747 * ioctl to enable dirty logging on this memslot completes. Since the
6748 * split SPTEs retain the write and dirty bits of the huge SPTE, it is
6749 * safe for KVM to decide if a TLB flush is necessary based on the split
6750 * SPTEs.
6751 */
6752 }
6753
kvm_mmu_zap_collapsible_spte(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)6754 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
6755 struct kvm_rmap_head *rmap_head,
6756 const struct kvm_memory_slot *slot)
6757 {
6758 u64 *sptep;
6759 struct rmap_iterator iter;
6760 int need_tlb_flush = 0;
6761 struct kvm_mmu_page *sp;
6762
6763 restart:
6764 for_each_rmap_spte(rmap_head, &iter, sptep) {
6765 sp = sptep_to_sp(sptep);
6766
6767 /*
6768 * We cannot do huge page mapping for indirect shadow pages,
6769 * which are found on the last rmap (level = 1) when not using
6770 * tdp; such shadow pages are synced with the page table in
6771 * the guest, and the guest page table is using 4K page size
6772 * mapping if the indirect sp has level = 1.
6773 */
6774 if (sp->role.direct &&
6775 sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
6776 PG_LEVEL_NUM)) {
6777 kvm_zap_one_rmap_spte(kvm, rmap_head, sptep);
6778
6779 if (kvm_available_flush_remote_tlbs_range())
6780 kvm_flush_remote_tlbs_sptep(kvm, sptep);
6781 else
6782 need_tlb_flush = 1;
6783
6784 goto restart;
6785 }
6786 }
6787
6788 return need_tlb_flush;
6789 }
6790
kvm_rmap_zap_collapsible_sptes(struct kvm * kvm,const struct kvm_memory_slot * slot)6791 static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
6792 const struct kvm_memory_slot *slot)
6793 {
6794 /*
6795 * Note, use KVM_MAX_HUGEPAGE_LEVEL - 1 since there's no need to zap
6796 * pages that are already mapped at the maximum hugepage level.
6797 */
6798 if (walk_slot_rmaps(kvm, slot, kvm_mmu_zap_collapsible_spte,
6799 PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1, true))
6800 kvm_flush_remote_tlbs_memslot(kvm, slot);
6801 }
6802
kvm_mmu_zap_collapsible_sptes(struct kvm * kvm,const struct kvm_memory_slot * slot)6803 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
6804 const struct kvm_memory_slot *slot)
6805 {
6806 if (kvm_memslots_have_rmaps(kvm)) {
6807 write_lock(&kvm->mmu_lock);
6808 kvm_rmap_zap_collapsible_sptes(kvm, slot);
6809 write_unlock(&kvm->mmu_lock);
6810 }
6811
6812 if (tdp_mmu_enabled) {
6813 read_lock(&kvm->mmu_lock);
6814 kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
6815 read_unlock(&kvm->mmu_lock);
6816 }
6817 }
6818
kvm_mmu_slot_leaf_clear_dirty(struct kvm * kvm,const struct kvm_memory_slot * memslot)6819 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
6820 const struct kvm_memory_slot *memslot)
6821 {
6822 if (kvm_memslots_have_rmaps(kvm)) {
6823 write_lock(&kvm->mmu_lock);
6824 /*
6825 * Clear dirty bits only on 4k SPTEs since the legacy MMU only
6826 * support dirty logging at a 4k granularity.
6827 */
6828 walk_slot_rmaps_4k(kvm, memslot, __rmap_clear_dirty, false);
6829 write_unlock(&kvm->mmu_lock);
6830 }
6831
6832 if (tdp_mmu_enabled) {
6833 read_lock(&kvm->mmu_lock);
6834 kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
6835 read_unlock(&kvm->mmu_lock);
6836 }
6837
6838 /*
6839 * The caller will flush the TLBs after this function returns.
6840 *
6841 * It's also safe to flush TLBs out of mmu lock here as currently this
6842 * function is only used for dirty logging, in which case flushing TLB
6843 * out of mmu lock also guarantees no dirty pages will be lost in
6844 * dirty_bitmap.
6845 */
6846 }
6847
kvm_mmu_zap_all(struct kvm * kvm)6848 static void kvm_mmu_zap_all(struct kvm *kvm)
6849 {
6850 struct kvm_mmu_page *sp, *node;
6851 LIST_HEAD(invalid_list);
6852 int ign;
6853
6854 write_lock(&kvm->mmu_lock);
6855 restart:
6856 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
6857 if (WARN_ON_ONCE(sp->role.invalid))
6858 continue;
6859 if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
6860 goto restart;
6861 if (cond_resched_rwlock_write(&kvm->mmu_lock))
6862 goto restart;
6863 }
6864
6865 kvm_mmu_commit_zap_page(kvm, &invalid_list);
6866
6867 if (tdp_mmu_enabled)
6868 kvm_tdp_mmu_zap_all(kvm);
6869
6870 write_unlock(&kvm->mmu_lock);
6871 }
6872
kvm_arch_flush_shadow_all(struct kvm * kvm)6873 void kvm_arch_flush_shadow_all(struct kvm *kvm)
6874 {
6875 kvm_mmu_zap_all(kvm);
6876 }
6877
kvm_arch_flush_shadow_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)6878 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
6879 struct kvm_memory_slot *slot)
6880 {
6881 kvm_mmu_zap_all_fast(kvm);
6882 }
6883
kvm_mmu_invalidate_mmio_sptes(struct kvm * kvm,u64 gen)6884 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
6885 {
6886 WARN_ON_ONCE(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
6887
6888 gen &= MMIO_SPTE_GEN_MASK;
6889
6890 /*
6891 * Generation numbers are incremented in multiples of the number of
6892 * address spaces in order to provide unique generations across all
6893 * address spaces. Strip what is effectively the address space
6894 * modifier prior to checking for a wrap of the MMIO generation so
6895 * that a wrap in any address space is detected.
6896 */
6897 gen &= ~((u64)kvm_arch_nr_memslot_as_ids(kvm) - 1);
6898
6899 /*
6900 * The very rare case: if the MMIO generation number has wrapped,
6901 * zap all shadow pages.
6902 */
6903 if (unlikely(gen == 0)) {
6904 kvm_debug_ratelimited("zapping shadow pages for mmio generation wraparound\n");
6905 kvm_mmu_zap_all_fast(kvm);
6906 }
6907 }
6908
mmu_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)6909 static unsigned long mmu_shrink_scan(struct shrinker *shrink,
6910 struct shrink_control *sc)
6911 {
6912 struct kvm *kvm;
6913 int nr_to_scan = sc->nr_to_scan;
6914 unsigned long freed = 0;
6915
6916 mutex_lock(&kvm_lock);
6917
6918 list_for_each_entry(kvm, &vm_list, vm_list) {
6919 int idx;
6920 LIST_HEAD(invalid_list);
6921
6922 /*
6923 * Never scan more than sc->nr_to_scan VM instances.
6924 * Will not hit this condition practically since we do not try
6925 * to shrink more than one VM and it is very unlikely to see
6926 * !n_used_mmu_pages so many times.
6927 */
6928 if (!nr_to_scan--)
6929 break;
6930 /*
6931 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
6932 * here. We may skip a VM instance errorneosly, but we do not
6933 * want to shrink a VM that only started to populate its MMU
6934 * anyway.
6935 */
6936 if (!kvm->arch.n_used_mmu_pages &&
6937 !kvm_has_zapped_obsolete_pages(kvm))
6938 continue;
6939
6940 idx = srcu_read_lock(&kvm->srcu);
6941 write_lock(&kvm->mmu_lock);
6942
6943 if (kvm_has_zapped_obsolete_pages(kvm)) {
6944 kvm_mmu_commit_zap_page(kvm,
6945 &kvm->arch.zapped_obsolete_pages);
6946 goto unlock;
6947 }
6948
6949 freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
6950
6951 unlock:
6952 write_unlock(&kvm->mmu_lock);
6953 srcu_read_unlock(&kvm->srcu, idx);
6954
6955 /*
6956 * unfair on small ones
6957 * per-vm shrinkers cry out
6958 * sadness comes quickly
6959 */
6960 list_move_tail(&kvm->vm_list, &vm_list);
6961 break;
6962 }
6963
6964 mutex_unlock(&kvm_lock);
6965 return freed;
6966 }
6967
mmu_shrink_count(struct shrinker * shrink,struct shrink_control * sc)6968 static unsigned long mmu_shrink_count(struct shrinker *shrink,
6969 struct shrink_control *sc)
6970 {
6971 return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
6972 }
6973
6974 static struct shrinker *mmu_shrinker;
6975
mmu_destroy_caches(void)6976 static void mmu_destroy_caches(void)
6977 {
6978 kmem_cache_destroy(pte_list_desc_cache);
6979 kmem_cache_destroy(mmu_page_header_cache);
6980 }
6981
get_nx_huge_pages(char * buffer,const struct kernel_param * kp)6982 static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
6983 {
6984 if (nx_hugepage_mitigation_hard_disabled)
6985 return sysfs_emit(buffer, "never\n");
6986
6987 return param_get_bool(buffer, kp);
6988 }
6989
get_nx_auto_mode(void)6990 static bool get_nx_auto_mode(void)
6991 {
6992 /* Return true when CPU has the bug, and mitigations are ON */
6993 return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
6994 }
6995
__set_nx_huge_pages(bool val)6996 static void __set_nx_huge_pages(bool val)
6997 {
6998 nx_huge_pages = itlb_multihit_kvm_mitigation = val;
6999 }
7000
set_nx_huge_pages(const char * val,const struct kernel_param * kp)7001 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
7002 {
7003 bool old_val = nx_huge_pages;
7004 bool new_val;
7005
7006 if (nx_hugepage_mitigation_hard_disabled)
7007 return -EPERM;
7008
7009 /* In "auto" mode deploy workaround only if CPU has the bug. */
7010 if (sysfs_streq(val, "off")) {
7011 new_val = 0;
7012 } else if (sysfs_streq(val, "force")) {
7013 new_val = 1;
7014 } else if (sysfs_streq(val, "auto")) {
7015 new_val = get_nx_auto_mode();
7016 } else if (sysfs_streq(val, "never")) {
7017 new_val = 0;
7018
7019 mutex_lock(&kvm_lock);
7020 if (!list_empty(&vm_list)) {
7021 mutex_unlock(&kvm_lock);
7022 return -EBUSY;
7023 }
7024 nx_hugepage_mitigation_hard_disabled = true;
7025 mutex_unlock(&kvm_lock);
7026 } else if (kstrtobool(val, &new_val) < 0) {
7027 return -EINVAL;
7028 }
7029
7030 __set_nx_huge_pages(new_val);
7031
7032 if (new_val != old_val) {
7033 struct kvm *kvm;
7034
7035 mutex_lock(&kvm_lock);
7036
7037 list_for_each_entry(kvm, &vm_list, vm_list) {
7038 mutex_lock(&kvm->slots_lock);
7039 kvm_mmu_zap_all_fast(kvm);
7040 mutex_unlock(&kvm->slots_lock);
7041
7042 wake_up_process(kvm->arch.nx_huge_page_recovery_thread);
7043 }
7044 mutex_unlock(&kvm_lock);
7045 }
7046
7047 return 0;
7048 }
7049
7050 /*
7051 * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
7052 * its default value of -1 is technically undefined behavior for a boolean.
7053 * Forward the module init call to SPTE code so that it too can handle module
7054 * params that need to be resolved/snapshot.
7055 */
kvm_mmu_x86_module_init(void)7056 void __init kvm_mmu_x86_module_init(void)
7057 {
7058 if (nx_huge_pages == -1)
7059 __set_nx_huge_pages(get_nx_auto_mode());
7060
7061 /*
7062 * Snapshot userspace's desire to enable the TDP MMU. Whether or not the
7063 * TDP MMU is actually enabled is determined in kvm_configure_mmu()
7064 * when the vendor module is loaded.
7065 */
7066 tdp_mmu_allowed = tdp_mmu_enabled;
7067
7068 kvm_mmu_spte_module_init();
7069 }
7070
7071 /*
7072 * The bulk of the MMU initialization is deferred until the vendor module is
7073 * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
7074 * to be reset when a potentially different vendor module is loaded.
7075 */
kvm_mmu_vendor_module_init(void)7076 int kvm_mmu_vendor_module_init(void)
7077 {
7078 int ret = -ENOMEM;
7079
7080 /*
7081 * MMU roles use union aliasing which is, generally speaking, an
7082 * undefined behavior. However, we supposedly know how compilers behave
7083 * and the current status quo is unlikely to change. Guardians below are
7084 * supposed to let us know if the assumption becomes false.
7085 */
7086 BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
7087 BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
7088 BUILD_BUG_ON(sizeof(union kvm_cpu_role) != sizeof(u64));
7089
7090 kvm_mmu_reset_all_pte_masks();
7091
7092 pte_list_desc_cache = KMEM_CACHE(pte_list_desc, SLAB_ACCOUNT);
7093 if (!pte_list_desc_cache)
7094 goto out;
7095
7096 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
7097 sizeof(struct kvm_mmu_page),
7098 0, SLAB_ACCOUNT, NULL);
7099 if (!mmu_page_header_cache)
7100 goto out;
7101
7102 if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
7103 goto out;
7104
7105 mmu_shrinker = shrinker_alloc(0, "x86-mmu");
7106 if (!mmu_shrinker)
7107 goto out_shrinker;
7108
7109 mmu_shrinker->count_objects = mmu_shrink_count;
7110 mmu_shrinker->scan_objects = mmu_shrink_scan;
7111 mmu_shrinker->seeks = DEFAULT_SEEKS * 10;
7112
7113 shrinker_register(mmu_shrinker);
7114
7115 return 0;
7116
7117 out_shrinker:
7118 percpu_counter_destroy(&kvm_total_used_mmu_pages);
7119 out:
7120 mmu_destroy_caches();
7121 return ret;
7122 }
7123
kvm_mmu_destroy(struct kvm_vcpu * vcpu)7124 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
7125 {
7126 kvm_mmu_unload(vcpu);
7127 free_mmu_pages(&vcpu->arch.root_mmu);
7128 free_mmu_pages(&vcpu->arch.guest_mmu);
7129 mmu_free_memory_caches(vcpu);
7130 }
7131
kvm_mmu_vendor_module_exit(void)7132 void kvm_mmu_vendor_module_exit(void)
7133 {
7134 mmu_destroy_caches();
7135 percpu_counter_destroy(&kvm_total_used_mmu_pages);
7136 shrinker_free(mmu_shrinker);
7137 }
7138
7139 /*
7140 * Calculate the effective recovery period, accounting for '0' meaning "let KVM
7141 * select a halving time of 1 hour". Returns true if recovery is enabled.
7142 */
calc_nx_huge_pages_recovery_period(uint * period)7143 static bool calc_nx_huge_pages_recovery_period(uint *period)
7144 {
7145 /*
7146 * Use READ_ONCE to get the params, this may be called outside of the
7147 * param setters, e.g. by the kthread to compute its next timeout.
7148 */
7149 bool enabled = READ_ONCE(nx_huge_pages);
7150 uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
7151
7152 if (!enabled || !ratio)
7153 return false;
7154
7155 *period = READ_ONCE(nx_huge_pages_recovery_period_ms);
7156 if (!*period) {
7157 /* Make sure the period is not less than one second. */
7158 ratio = min(ratio, 3600u);
7159 *period = 60 * 60 * 1000 / ratio;
7160 }
7161 return true;
7162 }
7163
set_nx_huge_pages_recovery_param(const char * val,const struct kernel_param * kp)7164 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
7165 {
7166 bool was_recovery_enabled, is_recovery_enabled;
7167 uint old_period, new_period;
7168 int err;
7169
7170 if (nx_hugepage_mitigation_hard_disabled)
7171 return -EPERM;
7172
7173 was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
7174
7175 err = param_set_uint(val, kp);
7176 if (err)
7177 return err;
7178
7179 is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
7180
7181 if (is_recovery_enabled &&
7182 (!was_recovery_enabled || old_period > new_period)) {
7183 struct kvm *kvm;
7184
7185 mutex_lock(&kvm_lock);
7186
7187 list_for_each_entry(kvm, &vm_list, vm_list)
7188 wake_up_process(kvm->arch.nx_huge_page_recovery_thread);
7189
7190 mutex_unlock(&kvm_lock);
7191 }
7192
7193 return err;
7194 }
7195
kvm_recover_nx_huge_pages(struct kvm * kvm)7196 static void kvm_recover_nx_huge_pages(struct kvm *kvm)
7197 {
7198 unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
7199 struct kvm_memory_slot *slot;
7200 int rcu_idx;
7201 struct kvm_mmu_page *sp;
7202 unsigned int ratio;
7203 LIST_HEAD(invalid_list);
7204 bool flush = false;
7205 ulong to_zap;
7206
7207 rcu_idx = srcu_read_lock(&kvm->srcu);
7208 write_lock(&kvm->mmu_lock);
7209
7210 /*
7211 * Zapping TDP MMU shadow pages, including the remote TLB flush, must
7212 * be done under RCU protection, because the pages are freed via RCU
7213 * callback.
7214 */
7215 rcu_read_lock();
7216
7217 ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
7218 to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
7219 for ( ; to_zap; --to_zap) {
7220 if (list_empty(&kvm->arch.possible_nx_huge_pages))
7221 break;
7222
7223 /*
7224 * We use a separate list instead of just using active_mmu_pages
7225 * because the number of shadow pages that be replaced with an
7226 * NX huge page is expected to be relatively small compared to
7227 * the total number of shadow pages. And because the TDP MMU
7228 * doesn't use active_mmu_pages.
7229 */
7230 sp = list_first_entry(&kvm->arch.possible_nx_huge_pages,
7231 struct kvm_mmu_page,
7232 possible_nx_huge_page_link);
7233 WARN_ON_ONCE(!sp->nx_huge_page_disallowed);
7234 WARN_ON_ONCE(!sp->role.direct);
7235
7236 /*
7237 * Unaccount and do not attempt to recover any NX Huge Pages
7238 * that are being dirty tracked, as they would just be faulted
7239 * back in as 4KiB pages. The NX Huge Pages in this slot will be
7240 * recovered, along with all the other huge pages in the slot,
7241 * when dirty logging is disabled.
7242 *
7243 * Since gfn_to_memslot() is relatively expensive, it helps to
7244 * skip it if it the test cannot possibly return true. On the
7245 * other hand, if any memslot has logging enabled, chances are
7246 * good that all of them do, in which case unaccount_nx_huge_page()
7247 * is much cheaper than zapping the page.
7248 *
7249 * If a memslot update is in progress, reading an incorrect value
7250 * of kvm->nr_memslots_dirty_logging is not a problem: if it is
7251 * becoming zero, gfn_to_memslot() will be done unnecessarily; if
7252 * it is becoming nonzero, the page will be zapped unnecessarily.
7253 * Either way, this only affects efficiency in racy situations,
7254 * and not correctness.
7255 */
7256 slot = NULL;
7257 if (atomic_read(&kvm->nr_memslots_dirty_logging)) {
7258 struct kvm_memslots *slots;
7259
7260 slots = kvm_memslots_for_spte_role(kvm, sp->role);
7261 slot = __gfn_to_memslot(slots, sp->gfn);
7262 WARN_ON_ONCE(!slot);
7263 }
7264
7265 if (slot && kvm_slot_dirty_track_enabled(slot))
7266 unaccount_nx_huge_page(kvm, sp);
7267 else if (is_tdp_mmu_page(sp))
7268 flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
7269 else
7270 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
7271 WARN_ON_ONCE(sp->nx_huge_page_disallowed);
7272
7273 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7274 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7275 rcu_read_unlock();
7276
7277 cond_resched_rwlock_write(&kvm->mmu_lock);
7278 flush = false;
7279
7280 rcu_read_lock();
7281 }
7282 }
7283 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7284
7285 rcu_read_unlock();
7286
7287 write_unlock(&kvm->mmu_lock);
7288 srcu_read_unlock(&kvm->srcu, rcu_idx);
7289 }
7290
get_nx_huge_page_recovery_timeout(u64 start_time)7291 static long get_nx_huge_page_recovery_timeout(u64 start_time)
7292 {
7293 bool enabled;
7294 uint period;
7295
7296 enabled = calc_nx_huge_pages_recovery_period(&period);
7297
7298 return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
7299 : MAX_SCHEDULE_TIMEOUT;
7300 }
7301
kvm_nx_huge_page_recovery_worker(struct kvm * kvm,uintptr_t data)7302 static int kvm_nx_huge_page_recovery_worker(struct kvm *kvm, uintptr_t data)
7303 {
7304 u64 start_time;
7305 long remaining_time;
7306
7307 while (true) {
7308 start_time = get_jiffies_64();
7309 remaining_time = get_nx_huge_page_recovery_timeout(start_time);
7310
7311 set_current_state(TASK_INTERRUPTIBLE);
7312 while (!kthread_should_stop() && remaining_time > 0) {
7313 schedule_timeout(remaining_time);
7314 remaining_time = get_nx_huge_page_recovery_timeout(start_time);
7315 set_current_state(TASK_INTERRUPTIBLE);
7316 }
7317
7318 set_current_state(TASK_RUNNING);
7319
7320 if (kthread_should_stop())
7321 return 0;
7322
7323 kvm_recover_nx_huge_pages(kvm);
7324 }
7325 }
7326
kvm_mmu_post_init_vm(struct kvm * kvm)7327 int kvm_mmu_post_init_vm(struct kvm *kvm)
7328 {
7329 int err;
7330
7331 if (nx_hugepage_mitigation_hard_disabled)
7332 return 0;
7333
7334 err = kvm_vm_create_worker_thread(kvm, kvm_nx_huge_page_recovery_worker, 0,
7335 "kvm-nx-lpage-recovery",
7336 &kvm->arch.nx_huge_page_recovery_thread);
7337 if (!err)
7338 kthread_unpark(kvm->arch.nx_huge_page_recovery_thread);
7339
7340 return err;
7341 }
7342
kvm_mmu_pre_destroy_vm(struct kvm * kvm)7343 void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
7344 {
7345 if (kvm->arch.nx_huge_page_recovery_thread)
7346 kthread_stop(kvm->arch.nx_huge_page_recovery_thread);
7347 }
7348
7349 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
kvm_arch_pre_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range)7350 bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
7351 struct kvm_gfn_range *range)
7352 {
7353 /*
7354 * Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only
7355 * supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
7356 * can simply ignore such slots. But if userspace is making memory
7357 * PRIVATE, then KVM must prevent the guest from accessing the memory
7358 * as shared. And if userspace is making memory SHARED and this point
7359 * is reached, then at least one page within the range was previously
7360 * PRIVATE, i.e. the slot's possible hugepage ranges are changing.
7361 * Zapping SPTEs in this case ensures KVM will reassess whether or not
7362 * a hugepage can be used for affected ranges.
7363 */
7364 if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
7365 return false;
7366
7367 return kvm_unmap_gfn_range(kvm, range);
7368 }
7369
hugepage_test_mixed(struct kvm_memory_slot * slot,gfn_t gfn,int level)7370 static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7371 int level)
7372 {
7373 return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
7374 }
7375
hugepage_clear_mixed(struct kvm_memory_slot * slot,gfn_t gfn,int level)7376 static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7377 int level)
7378 {
7379 lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
7380 }
7381
hugepage_set_mixed(struct kvm_memory_slot * slot,gfn_t gfn,int level)7382 static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7383 int level)
7384 {
7385 lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
7386 }
7387
hugepage_has_attrs(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,int level,unsigned long attrs)7388 static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
7389 gfn_t gfn, int level, unsigned long attrs)
7390 {
7391 const unsigned long start = gfn;
7392 const unsigned long end = start + KVM_PAGES_PER_HPAGE(level);
7393
7394 if (level == PG_LEVEL_2M)
7395 return kvm_range_has_memory_attributes(kvm, start, end, attrs);
7396
7397 for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
7398 if (hugepage_test_mixed(slot, gfn, level - 1) ||
7399 attrs != kvm_get_memory_attributes(kvm, gfn))
7400 return false;
7401 }
7402 return true;
7403 }
7404
kvm_arch_post_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range)7405 bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
7406 struct kvm_gfn_range *range)
7407 {
7408 unsigned long attrs = range->arg.attributes;
7409 struct kvm_memory_slot *slot = range->slot;
7410 int level;
7411
7412 lockdep_assert_held_write(&kvm->mmu_lock);
7413 lockdep_assert_held(&kvm->slots_lock);
7414
7415 /*
7416 * Calculate which ranges can be mapped with hugepages even if the slot
7417 * can't map memory PRIVATE. KVM mustn't create a SHARED hugepage over
7418 * a range that has PRIVATE GFNs, and conversely converting a range to
7419 * SHARED may now allow hugepages.
7420 */
7421 if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
7422 return false;
7423
7424 /*
7425 * The sequence matters here: upper levels consume the result of lower
7426 * level's scanning.
7427 */
7428 for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
7429 gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
7430 gfn_t gfn = gfn_round_for_level(range->start, level);
7431
7432 /* Process the head page if it straddles the range. */
7433 if (gfn != range->start || gfn + nr_pages > range->end) {
7434 /*
7435 * Skip mixed tracking if the aligned gfn isn't covered
7436 * by the memslot, KVM can't use a hugepage due to the
7437 * misaligned address regardless of memory attributes.
7438 */
7439 if (gfn >= slot->base_gfn &&
7440 gfn + nr_pages <= slot->base_gfn + slot->npages) {
7441 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7442 hugepage_clear_mixed(slot, gfn, level);
7443 else
7444 hugepage_set_mixed(slot, gfn, level);
7445 }
7446 gfn += nr_pages;
7447 }
7448
7449 /*
7450 * Pages entirely covered by the range are guaranteed to have
7451 * only the attributes which were just set.
7452 */
7453 for ( ; gfn + nr_pages <= range->end; gfn += nr_pages)
7454 hugepage_clear_mixed(slot, gfn, level);
7455
7456 /*
7457 * Process the last tail page if it straddles the range and is
7458 * contained by the memslot. Like the head page, KVM can't
7459 * create a hugepage if the slot size is misaligned.
7460 */
7461 if (gfn < range->end &&
7462 (gfn + nr_pages) <= (slot->base_gfn + slot->npages)) {
7463 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7464 hugepage_clear_mixed(slot, gfn, level);
7465 else
7466 hugepage_set_mixed(slot, gfn, level);
7467 }
7468 }
7469 return false;
7470 }
7471
kvm_mmu_init_memslot_memory_attributes(struct kvm * kvm,struct kvm_memory_slot * slot)7472 void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
7473 struct kvm_memory_slot *slot)
7474 {
7475 int level;
7476
7477 if (!kvm_arch_has_private_mem(kvm))
7478 return;
7479
7480 for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
7481 /*
7482 * Don't bother tracking mixed attributes for pages that can't
7483 * be huge due to alignment, i.e. process only pages that are
7484 * entirely contained by the memslot.
7485 */
7486 gfn_t end = gfn_round_for_level(slot->base_gfn + slot->npages, level);
7487 gfn_t start = gfn_round_for_level(slot->base_gfn, level);
7488 gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
7489 gfn_t gfn;
7490
7491 if (start < slot->base_gfn)
7492 start += nr_pages;
7493
7494 /*
7495 * Unlike setting attributes, every potential hugepage needs to
7496 * be manually checked as the attributes may already be mixed.
7497 */
7498 for (gfn = start; gfn < end; gfn += nr_pages) {
7499 unsigned long attrs = kvm_get_memory_attributes(kvm, gfn);
7500
7501 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7502 hugepage_clear_mixed(slot, gfn, level);
7503 else
7504 hugepage_set_mixed(slot, gfn, level);
7505 }
7506 }
7507 }
7508 #endif
7509