xref: /linux/arch/arm64/kvm/sys_regs.h (revision f0cf7ffc)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/kvm/coproc.h
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Authors: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #ifndef __ARM64_KVM_SYS_REGS_LOCAL_H__
12 #define __ARM64_KVM_SYS_REGS_LOCAL_H__
13 
14 #include <linux/bsearch.h>
15 
16 #define reg_to_encoding(x)						\
17 	sys_reg((u32)(x)->Op0, (u32)(x)->Op1,				\
18 		(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
19 
20 struct sys_reg_params {
21 	u8	Op0;
22 	u8	Op1;
23 	u8	CRn;
24 	u8	CRm;
25 	u8	Op2;
26 	u64	regval;
27 	bool	is_write;
28 };
29 
30 #define encoding_to_params(reg)						\
31 	((struct sys_reg_params){ .Op0 = sys_reg_Op0(reg),		\
32 				  .Op1 = sys_reg_Op1(reg),		\
33 				  .CRn = sys_reg_CRn(reg),		\
34 				  .CRm = sys_reg_CRm(reg),		\
35 				  .Op2 = sys_reg_Op2(reg) })
36 
37 #define esr_sys64_to_params(esr)                                               \
38 	((struct sys_reg_params){ .Op0 = ((esr) >> 20) & 3,                    \
39 				  .Op1 = ((esr) >> 14) & 0x7,                  \
40 				  .CRn = ((esr) >> 10) & 0xf,                  \
41 				  .CRm = ((esr) >> 1) & 0xf,                   \
42 				  .Op2 = ((esr) >> 17) & 0x7,                  \
43 				  .is_write = !((esr) & 1) })
44 
45 #define esr_cp1x_32_to_params(esr)						\
46 	((struct sys_reg_params){ .Op1 = ((esr) >> 14) & 0x7,			\
47 				  .CRn = ((esr) >> 10) & 0xf,			\
48 				  .CRm = ((esr) >> 1) & 0xf,			\
49 				  .Op2 = ((esr) >> 17) & 0x7,			\
50 				  .is_write = !((esr) & 1) })
51 
52 struct sys_reg_desc {
53 	/* Sysreg string for debug */
54 	const char *name;
55 
56 	enum {
57 		AA32_DIRECT,
58 		AA32_LO,
59 		AA32_HI,
60 	} aarch32_map;
61 
62 	/* MRS/MSR instruction which accesses it. */
63 	u8	Op0;
64 	u8	Op1;
65 	u8	CRn;
66 	u8	CRm;
67 	u8	Op2;
68 
69 	/* Trapped access from guest, if non-NULL. */
70 	bool (*access)(struct kvm_vcpu *,
71 		       struct sys_reg_params *,
72 		       const struct sys_reg_desc *);
73 
74 	/*
75 	 * Initialization for vcpu. Return initialized value, or KVM
76 	 * sanitized value for ID registers.
77 	 */
78 	u64 (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *);
79 
80 	/* Index into sys_reg[], or 0 if we don't need to save it. */
81 	int reg;
82 
83 	/* Value (usually reset value), or write mask for idregs */
84 	u64 val;
85 
86 	/* Custom get/set_user functions, fallback to generic if NULL */
87 	int (*get_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
88 			u64 *val);
89 	int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
90 			u64 val);
91 
92 	/* Return mask of REG_* runtime visibility overrides */
93 	unsigned int (*visibility)(const struct kvm_vcpu *vcpu,
94 				   const struct sys_reg_desc *rd);
95 };
96 
97 #define REG_HIDDEN		(1 << 0) /* hidden from userspace and guest */
98 #define REG_HIDDEN_USER		(1 << 1) /* hidden from userspace only */
99 #define REG_RAZ			(1 << 2) /* RAZ from userspace and guest */
100 #define REG_USER_WI		(1 << 3) /* WI from userspace only */
101 
102 static __printf(2, 3)
103 inline void print_sys_reg_msg(const struct sys_reg_params *p,
104 				       char *fmt, ...)
105 {
106 	va_list va;
107 
108 	va_start(va, fmt);
109 	/* Look, we even formatted it for you to paste into the table! */
110 	kvm_pr_unimpl("%pV { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n",
111 		      &(struct va_format){ fmt, &va },
112 		      p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read");
113 	va_end(va);
114 }
115 
116 static inline void print_sys_reg_instr(const struct sys_reg_params *p)
117 {
118 	/* GCC warns on an empty format string */
119 	print_sys_reg_msg(p, "%s", "");
120 }
121 
122 static inline bool ignore_write(struct kvm_vcpu *vcpu,
123 				const struct sys_reg_params *p)
124 {
125 	return true;
126 }
127 
128 static inline bool read_zero(struct kvm_vcpu *vcpu,
129 			     struct sys_reg_params *p)
130 {
131 	p->regval = 0;
132 	return true;
133 }
134 
135 /* Reset functions */
136 static inline u64 reset_unknown(struct kvm_vcpu *vcpu,
137 				 const struct sys_reg_desc *r)
138 {
139 	BUG_ON(!r->reg);
140 	BUG_ON(r->reg >= NR_SYS_REGS);
141 	__vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL;
142 	return __vcpu_sys_reg(vcpu, r->reg);
143 }
144 
145 static inline u64 reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
146 {
147 	BUG_ON(!r->reg);
148 	BUG_ON(r->reg >= NR_SYS_REGS);
149 	__vcpu_sys_reg(vcpu, r->reg) = r->val;
150 	return __vcpu_sys_reg(vcpu, r->reg);
151 }
152 
153 static inline unsigned int sysreg_visibility(const struct kvm_vcpu *vcpu,
154 					     const struct sys_reg_desc *r)
155 {
156 	if (likely(!r->visibility))
157 		return 0;
158 
159 	return r->visibility(vcpu, r);
160 }
161 
162 static inline bool sysreg_hidden(const struct kvm_vcpu *vcpu,
163 				 const struct sys_reg_desc *r)
164 {
165 	return sysreg_visibility(vcpu, r) & REG_HIDDEN;
166 }
167 
168 static inline bool sysreg_hidden_user(const struct kvm_vcpu *vcpu,
169 				      const struct sys_reg_desc *r)
170 {
171 	if (likely(!r->visibility))
172 		return false;
173 
174 	return r->visibility(vcpu, r) & (REG_HIDDEN | REG_HIDDEN_USER);
175 }
176 
177 static inline bool sysreg_visible_as_raz(const struct kvm_vcpu *vcpu,
178 					 const struct sys_reg_desc *r)
179 {
180 	return sysreg_visibility(vcpu, r) & REG_RAZ;
181 }
182 
183 static inline bool sysreg_user_write_ignore(const struct kvm_vcpu *vcpu,
184 					    const struct sys_reg_desc *r)
185 {
186 	return sysreg_visibility(vcpu, r) & REG_USER_WI;
187 }
188 
189 static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
190 			      const struct sys_reg_desc *i2)
191 {
192 	BUG_ON(i1 == i2);
193 	if (!i1)
194 		return 1;
195 	else if (!i2)
196 		return -1;
197 	if (i1->Op0 != i2->Op0)
198 		return i1->Op0 - i2->Op0;
199 	if (i1->Op1 != i2->Op1)
200 		return i1->Op1 - i2->Op1;
201 	if (i1->CRn != i2->CRn)
202 		return i1->CRn - i2->CRn;
203 	if (i1->CRm != i2->CRm)
204 		return i1->CRm - i2->CRm;
205 	return i1->Op2 - i2->Op2;
206 }
207 
208 static inline int match_sys_reg(const void *key, const void *elt)
209 {
210 	const unsigned long pval = (unsigned long)key;
211 	const struct sys_reg_desc *r = elt;
212 
213 	return pval - reg_to_encoding(r);
214 }
215 
216 static inline const struct sys_reg_desc *
217 find_reg(const struct sys_reg_params *params, const struct sys_reg_desc table[],
218 	 unsigned int num)
219 {
220 	unsigned long pval = reg_to_encoding(params);
221 
222 	return __inline_bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
223 }
224 
225 const struct sys_reg_desc *get_reg_by_id(u64 id,
226 					 const struct sys_reg_desc table[],
227 					 unsigned int num);
228 
229 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
230 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
231 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
232 			 const struct sys_reg_desc table[], unsigned int num);
233 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
234 			 const struct sys_reg_desc table[], unsigned int num);
235 
236 bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index);
237 
238 #define AA32(_x)	.aarch32_map = AA32_##_x
239 #define Op0(_x) 	.Op0 = _x
240 #define Op1(_x) 	.Op1 = _x
241 #define CRn(_x)		.CRn = _x
242 #define CRm(_x) 	.CRm = _x
243 #define Op2(_x) 	.Op2 = _x
244 
245 #define SYS_DESC(reg)					\
246 	.name = #reg,					\
247 	Op0(sys_reg_Op0(reg)), Op1(sys_reg_Op1(reg)),	\
248 	CRn(sys_reg_CRn(reg)), CRm(sys_reg_CRm(reg)),	\
249 	Op2(sys_reg_Op2(reg))
250 
251 #endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */
252