1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ARM64_KVM_NESTED_H
3 #define __ARM64_KVM_NESTED_H
4
5 #include <linux/bitfield.h>
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_emulate.h>
8 #include <asm/kvm_pgtable.h>
9
vcpu_has_nv(const struct kvm_vcpu * vcpu)10 static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
11 {
12 return (!__is_defined(__KVM_NVHE_HYPERVISOR__) &&
13 cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
14 vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2));
15 }
16
17 /* Translation helpers from non-VHE EL2 to EL1 */
tcr_el2_ps_to_tcr_el1_ips(u64 tcr_el2)18 static inline u64 tcr_el2_ps_to_tcr_el1_ips(u64 tcr_el2)
19 {
20 return (u64)FIELD_GET(TCR_EL2_PS_MASK, tcr_el2) << TCR_IPS_SHIFT;
21 }
22
translate_tcr_el2_to_tcr_el1(u64 tcr)23 static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr)
24 {
25 return TCR_EPD1_MASK | /* disable TTBR1_EL1 */
26 ((tcr & TCR_EL2_TBI) ? TCR_TBI0 : 0) |
27 tcr_el2_ps_to_tcr_el1_ips(tcr) |
28 (tcr & TCR_EL2_TG0_MASK) |
29 (tcr & TCR_EL2_ORGN0_MASK) |
30 (tcr & TCR_EL2_IRGN0_MASK) |
31 (tcr & TCR_EL2_T0SZ_MASK);
32 }
33
translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)34 static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)
35 {
36 u64 cpacr_el1 = CPACR_ELx_RES1;
37
38 if (cptr_el2 & CPTR_EL2_TTA)
39 cpacr_el1 |= CPACR_ELx_TTA;
40 if (!(cptr_el2 & CPTR_EL2_TFP))
41 cpacr_el1 |= CPACR_ELx_FPEN;
42 if (!(cptr_el2 & CPTR_EL2_TZ))
43 cpacr_el1 |= CPACR_ELx_ZEN;
44
45 cpacr_el1 |= cptr_el2 & (CPTR_EL2_TCPAC | CPTR_EL2_TAM);
46
47 return cpacr_el1;
48 }
49
translate_sctlr_el2_to_sctlr_el1(u64 val)50 static inline u64 translate_sctlr_el2_to_sctlr_el1(u64 val)
51 {
52 /* Only preserve the minimal set of bits we support */
53 val &= (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | SCTLR_ELx_SA |
54 SCTLR_ELx_I | SCTLR_ELx_IESB | SCTLR_ELx_WXN | SCTLR_ELx_EE);
55 val |= SCTLR_EL1_RES1;
56
57 return val;
58 }
59
translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)60 static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
61 {
62 /* Clear the ASID field */
63 return ttbr0 & ~GENMASK_ULL(63, 48);
64 }
65
66 extern bool forward_smc_trap(struct kvm_vcpu *vcpu);
67 extern void kvm_init_nested(struct kvm *kvm);
68 extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
69 extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
70 extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm_vcpu *vcpu);
71
72 union tlbi_info;
73
74 extern void kvm_s2_mmu_iterate_by_vmid(struct kvm *kvm, u16 vmid,
75 const union tlbi_info *info,
76 void (*)(struct kvm_s2_mmu *,
77 const union tlbi_info *));
78 extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
79 extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
80
81 struct kvm_s2_trans {
82 phys_addr_t output;
83 unsigned long block_size;
84 bool writable;
85 bool readable;
86 int level;
87 u32 esr;
88 u64 desc;
89 };
90
kvm_s2_trans_output(struct kvm_s2_trans * trans)91 static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
92 {
93 return trans->output;
94 }
95
kvm_s2_trans_size(struct kvm_s2_trans * trans)96 static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
97 {
98 return trans->block_size;
99 }
100
kvm_s2_trans_esr(struct kvm_s2_trans * trans)101 static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
102 {
103 return trans->esr;
104 }
105
kvm_s2_trans_readable(struct kvm_s2_trans * trans)106 static inline bool kvm_s2_trans_readable(struct kvm_s2_trans *trans)
107 {
108 return trans->readable;
109 }
110
kvm_s2_trans_writable(struct kvm_s2_trans * trans)111 static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans)
112 {
113 return trans->writable;
114 }
115
kvm_s2_trans_executable(struct kvm_s2_trans * trans)116 static inline bool kvm_s2_trans_executable(struct kvm_s2_trans *trans)
117 {
118 return !(trans->desc & BIT(54));
119 }
120
121 extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
122 struct kvm_s2_trans *result);
123 extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
124 struct kvm_s2_trans *trans);
125 extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
126 extern void kvm_nested_s2_wp(struct kvm *kvm);
127 extern void kvm_nested_s2_unmap(struct kvm *kvm);
128 extern void kvm_nested_s2_flush(struct kvm *kvm);
129
130 unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val);
131
kvm_supported_tlbi_s1e1_op(struct kvm_vcpu * vpcu,u32 instr)132 static inline bool kvm_supported_tlbi_s1e1_op(struct kvm_vcpu *vpcu, u32 instr)
133 {
134 struct kvm *kvm = vpcu->kvm;
135 u8 CRm = sys_reg_CRm(instr);
136
137 if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
138 sys_reg_Op1(instr) == TLBI_Op1_EL1))
139 return false;
140
141 if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
142 (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
143 kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
144 return false;
145
146 if (CRm == TLBI_CRm_nROS &&
147 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
148 return false;
149
150 if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
151 CRm == TLBI_CRm_RNS) &&
152 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
153 return false;
154
155 return true;
156 }
157
kvm_supported_tlbi_s1e2_op(struct kvm_vcpu * vpcu,u32 instr)158 static inline bool kvm_supported_tlbi_s1e2_op(struct kvm_vcpu *vpcu, u32 instr)
159 {
160 struct kvm *kvm = vpcu->kvm;
161 u8 CRm = sys_reg_CRm(instr);
162
163 if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
164 sys_reg_Op1(instr) == TLBI_Op1_EL2))
165 return false;
166
167 if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
168 (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
169 kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
170 return false;
171
172 if (CRm == TLBI_CRm_IPAIS || CRm == TLBI_CRm_IPAONS)
173 return false;
174
175 if (CRm == TLBI_CRm_nROS &&
176 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
177 return false;
178
179 if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
180 CRm == TLBI_CRm_RNS) &&
181 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
182 return false;
183
184 return true;
185 }
186
187 int kvm_init_nv_sysregs(struct kvm *kvm);
188
189 #ifdef CONFIG_ARM64_PTR_AUTH
190 bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr);
191 #else
kvm_auth_eretax(struct kvm_vcpu * vcpu,u64 * elr)192 static inline bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
193 {
194 /* We really should never execute this... */
195 WARN_ON_ONCE(1);
196 *elr = 0xbad9acc0debadbad;
197 return false;
198 }
199 #endif
200
201 #define KVM_NV_GUEST_MAP_SZ (KVM_PGTABLE_PROT_SW1 | KVM_PGTABLE_PROT_SW0)
202
kvm_encode_nested_level(struct kvm_s2_trans * trans)203 static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
204 {
205 return FIELD_PREP(KVM_NV_GUEST_MAP_SZ, trans->level);
206 }
207
208 /* Adjust alignment for the contiguous bit as per StageOA() */
209 #define contiguous_bit_shift(d, wi, l) \
210 ({ \
211 u8 shift = 0; \
212 \
213 if ((d) & PTE_CONT) { \
214 switch (BIT((wi)->pgshift)) { \
215 case SZ_4K: \
216 shift = 4; \
217 break; \
218 case SZ_16K: \
219 shift = (l) == 2 ? 5 : 7; \
220 break; \
221 case SZ_64K: \
222 shift = 5; \
223 break; \
224 } \
225 } \
226 \
227 shift; \
228 })
229
ps_to_output_size(unsigned int ps)230 static inline unsigned int ps_to_output_size(unsigned int ps)
231 {
232 switch (ps) {
233 case 0: return 32;
234 case 1: return 36;
235 case 2: return 40;
236 case 3: return 42;
237 case 4: return 44;
238 case 5:
239 default:
240 return 48;
241 }
242 }
243
244 #endif /* __ARM64_KVM_NESTED_H */
245