xref: /linux/arch/x86/kvm/kvm_cache_regs.h (revision 9a6b55ac)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ASM_KVM_CACHE_REGS_H
3 #define ASM_KVM_CACHE_REGS_H
4 
5 #include <linux/kvm_host.h>
6 
7 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
8 #define KVM_POSSIBLE_CR4_GUEST_BITS				  \
9 	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
10 	 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
11 
12 #define BUILD_KVM_GPR_ACCESSORS(lname, uname)				      \
13 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
14 {									      \
15 	return vcpu->arch.regs[VCPU_REGS_##uname];			      \
16 }									      \
17 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu,	      \
18 						unsigned long val)	      \
19 {									      \
20 	vcpu->arch.regs[VCPU_REGS_##uname] = val;			      \
21 }
22 BUILD_KVM_GPR_ACCESSORS(rax, RAX)
23 BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
24 BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
25 BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
26 BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
27 BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
28 BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
29 #ifdef CONFIG_X86_64
30 BUILD_KVM_GPR_ACCESSORS(r8,  R8)
31 BUILD_KVM_GPR_ACCESSORS(r9,  R9)
32 BUILD_KVM_GPR_ACCESSORS(r10, R10)
33 BUILD_KVM_GPR_ACCESSORS(r11, R11)
34 BUILD_KVM_GPR_ACCESSORS(r12, R12)
35 BUILD_KVM_GPR_ACCESSORS(r13, R13)
36 BUILD_KVM_GPR_ACCESSORS(r14, R14)
37 BUILD_KVM_GPR_ACCESSORS(r15, R15)
38 #endif
39 
40 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
41 					     enum kvm_reg reg)
42 {
43 	return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
44 }
45 
46 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
47 					 enum kvm_reg reg)
48 {
49 	return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
50 }
51 
52 static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
53 					       enum kvm_reg reg)
54 {
55 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
56 }
57 
58 static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
59 					   enum kvm_reg reg)
60 {
61 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
62 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
63 }
64 
65 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
66 {
67 	if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
68 		return 0;
69 
70 	if (!kvm_register_is_available(vcpu, reg))
71 		kvm_x86_ops->cache_reg(vcpu, reg);
72 
73 	return vcpu->arch.regs[reg];
74 }
75 
76 static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg,
77 				      unsigned long val)
78 {
79 	if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
80 		return;
81 
82 	vcpu->arch.regs[reg] = val;
83 	kvm_register_mark_dirty(vcpu, reg);
84 }
85 
86 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
87 {
88 	return kvm_register_read(vcpu, VCPU_REGS_RIP);
89 }
90 
91 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
92 {
93 	kvm_register_write(vcpu, VCPU_REGS_RIP, val);
94 }
95 
96 static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
97 {
98 	return kvm_register_read(vcpu, VCPU_REGS_RSP);
99 }
100 
101 static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
102 {
103 	kvm_register_write(vcpu, VCPU_REGS_RSP, val);
104 }
105 
106 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
107 {
108 	might_sleep();  /* on svm */
109 
110 	if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
111 		kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
112 
113 	return vcpu->arch.walk_mmu->pdptrs[index];
114 }
115 
116 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
117 {
118 	ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
119 	if (tmask & vcpu->arch.cr0_guest_owned_bits)
120 		kvm_x86_ops->decache_cr0_guest_bits(vcpu);
121 	return vcpu->arch.cr0 & mask;
122 }
123 
124 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
125 {
126 	return kvm_read_cr0_bits(vcpu, ~0UL);
127 }
128 
129 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
130 {
131 	ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
132 	if (tmask & vcpu->arch.cr4_guest_owned_bits)
133 		kvm_x86_ops->decache_cr4_guest_bits(vcpu);
134 	return vcpu->arch.cr4 & mask;
135 }
136 
137 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
138 {
139 	if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
140 		kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_CR3);
141 	return vcpu->arch.cr3;
142 }
143 
144 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
145 {
146 	return kvm_read_cr4_bits(vcpu, ~0UL);
147 }
148 
149 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
150 {
151 	return (kvm_rax_read(vcpu) & -1u)
152 		| ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
153 }
154 
155 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
156 {
157 	vcpu->arch.hflags |= HF_GUEST_MASK;
158 }
159 
160 static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
161 {
162 	vcpu->arch.hflags &= ~HF_GUEST_MASK;
163 
164 	if (vcpu->arch.load_eoi_exitmap_pending) {
165 		vcpu->arch.load_eoi_exitmap_pending = false;
166 		kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
167 	}
168 }
169 
170 static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
171 {
172 	return vcpu->arch.hflags & HF_GUEST_MASK;
173 }
174 
175 static inline bool is_smm(struct kvm_vcpu *vcpu)
176 {
177 	return vcpu->arch.hflags & HF_SMM_MASK;
178 }
179 
180 #endif
181