1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_MMU_INTERNAL_H
3 #define __KVM_X86_MMU_INTERNAL_H
4
5 #include <linux/types.h>
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_host.h>
8
9 #undef MMU_DEBUG
10
11 #ifdef MMU_DEBUG
12 extern bool dbg;
13
14 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
15 #define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0)
16 #define MMU_WARN_ON(x) WARN_ON(x)
17 #else
18 #define pgprintk(x...) do { } while (0)
19 #define rmap_printk(x...) do { } while (0)
20 #define MMU_WARN_ON(x) do { } while (0)
21 #endif
22
23 /*
24 * Unlike regular MMU roots, PAE "roots", a.k.a. PDPTEs/PDPTRs, have a PRESENT
25 * bit, and thus are guaranteed to be non-zero when valid. And, when a guest
26 * PDPTR is !PRESENT, its corresponding PAE root cannot be set to INVALID_PAGE,
27 * as the CPU would treat that as PRESENT PDPTR with reserved bits set. Use
28 * '0' instead of INVALID_PAGE to indicate an invalid PAE root.
29 */
30 #define INVALID_PAE_ROOT 0
31 #define IS_VALID_PAE_ROOT(x) (!!(x))
32
33 struct kvm_mmu_page {
34 struct list_head link;
35 struct hlist_node hash_link;
36 struct list_head lpage_disallowed_link;
37
38 bool unsync;
39 u8 mmu_valid_gen;
40 bool mmio_cached;
41 bool lpage_disallowed; /* Can't be replaced by an equiv large page */
42
43 /*
44 * The following two entries are used to key the shadow page in the
45 * hash table.
46 */
47 union kvm_mmu_page_role role;
48 gfn_t gfn;
49
50 u64 *spt;
51 /* hold the gfn of each spte inside spt */
52 gfn_t *gfns;
53 /* Currently serving as active root */
54 union {
55 int root_count;
56 refcount_t tdp_mmu_root_count;
57 };
58 unsigned int unsync_children;
59 struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
60 DECLARE_BITMAP(unsync_child_bitmap, 512);
61
62 #ifdef CONFIG_X86_32
63 /*
64 * Used out of the mmu-lock to avoid reading spte values while an
65 * update is in progress; see the comments in __get_spte_lockless().
66 */
67 int clear_spte_count;
68 #endif
69
70 /* Number of writes since the last time traversal visited this page. */
71 atomic_t write_flooding_count;
72
73 #ifdef CONFIG_X86_64
74 bool tdp_mmu_page;
75
76 /* Used for freeing the page asynchronously if it is a TDP MMU page. */
77 struct rcu_head rcu_head;
78 #endif
79 };
80
81 extern struct kmem_cache *mmu_page_header_cache;
82
to_shadow_page(hpa_t shadow_page)83 static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page)
84 {
85 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
86
87 return (struct kvm_mmu_page *)page_private(page);
88 }
89
sptep_to_sp(u64 * sptep)90 static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
91 {
92 return to_shadow_page(__pa(sptep));
93 }
94
kvm_mmu_role_as_id(union kvm_mmu_page_role role)95 static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
96 {
97 return role.smm ? 1 : 0;
98 }
99
kvm_mmu_page_as_id(struct kvm_mmu_page * sp)100 static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
101 {
102 return kvm_mmu_role_as_id(sp->role);
103 }
104
kvm_vcpu_ad_need_write_protect(struct kvm_vcpu * vcpu)105 static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
106 {
107 /*
108 * When using the EPT page-modification log, the GPAs in the CPU dirty
109 * log would come from L2 rather than L1. Therefore, we need to rely
110 * on write protection to record dirty pages, which bypasses PML, since
111 * writes now result in a vmexit. Note, the check on CPU dirty logging
112 * being enabled is mandatory as the bits used to denote WP-only SPTEs
113 * are reserved for NPT w/ PAE (32-bit KVM).
114 */
115 return vcpu->arch.mmu == &vcpu->arch.guest_mmu &&
116 kvm_x86_ops.cpu_dirty_log_size;
117 }
118
119 bool is_nx_huge_page_enabled(void);
120 bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
121 bool can_unsync);
122
123 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
124 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
125 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
126 struct kvm_memory_slot *slot, u64 gfn);
127 void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
128 u64 start_gfn, u64 pages);
129
130 /*
131 * Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault().
132 *
133 * RET_PF_RETRY: let CPU fault again on the address.
134 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
135 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
136 * RET_PF_FIXED: The faulting entry has been fixed.
137 * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU.
138 */
139 enum {
140 RET_PF_RETRY = 0,
141 RET_PF_EMULATE,
142 RET_PF_INVALID,
143 RET_PF_FIXED,
144 RET_PF_SPURIOUS,
145 };
146
147 /* Bits which may be returned by set_spte() */
148 #define SET_SPTE_WRITE_PROTECTED_PT BIT(0)
149 #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
150 #define SET_SPTE_SPURIOUS BIT(2)
151
152 int kvm_mmu_max_mapping_level(struct kvm *kvm,
153 const struct kvm_memory_slot *slot, gfn_t gfn,
154 kvm_pfn_t pfn, int max_level);
155 int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
156 int max_level, kvm_pfn_t *pfnp,
157 bool huge_page_disallowed, int *req_level);
158 void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
159 kvm_pfn_t *pfnp, int *goal_levelp);
160
161 bool is_nx_huge_page_enabled(void);
162
163 void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
164
165 void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
166 void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
167
168 #endif /* __KVM_X86_MMU_INTERNAL_H */
169