xref: /qemu/target/arm/hvf/hvf.c (revision f7ddd7b6)
1 /*
2  * QEMU Hypervisor.framework support for Apple Silicon
3 
4  * Copyright 2020 Alexander Graf <agraf@csgraf.de>
5  * Copyright 2020 Google LLC
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/error-report.h"
14 
15 #include "sysemu/runstate.h"
16 #include "sysemu/hvf.h"
17 #include "sysemu/hvf_int.h"
18 #include "sysemu/hw_accel.h"
19 #include "hvf_arm.h"
20 #include "cpregs.h"
21 
22 #include <mach/mach_time.h>
23 
24 #include "exec/address-spaces.h"
25 #include "hw/irq.h"
26 #include "qemu/main-loop.h"
27 #include "sysemu/cpus.h"
28 #include "arm-powerctl.h"
29 #include "target/arm/cpu.h"
30 #include "target/arm/internals.h"
31 #include "target/arm/multiprocessing.h"
32 #include "target/arm/gtimer.h"
33 #include "trace/trace-target_arm_hvf.h"
34 #include "migration/vmstate.h"
35 
36 #include "exec/gdbstub.h"
37 
38 #define MDSCR_EL1_SS_SHIFT  0
39 #define MDSCR_EL1_MDE_SHIFT 15
40 
41 static const uint16_t dbgbcr_regs[] = {
42     HV_SYS_REG_DBGBCR0_EL1,
43     HV_SYS_REG_DBGBCR1_EL1,
44     HV_SYS_REG_DBGBCR2_EL1,
45     HV_SYS_REG_DBGBCR3_EL1,
46     HV_SYS_REG_DBGBCR4_EL1,
47     HV_SYS_REG_DBGBCR5_EL1,
48     HV_SYS_REG_DBGBCR6_EL1,
49     HV_SYS_REG_DBGBCR7_EL1,
50     HV_SYS_REG_DBGBCR8_EL1,
51     HV_SYS_REG_DBGBCR9_EL1,
52     HV_SYS_REG_DBGBCR10_EL1,
53     HV_SYS_REG_DBGBCR11_EL1,
54     HV_SYS_REG_DBGBCR12_EL1,
55     HV_SYS_REG_DBGBCR13_EL1,
56     HV_SYS_REG_DBGBCR14_EL1,
57     HV_SYS_REG_DBGBCR15_EL1,
58 };
59 
60 static const uint16_t dbgbvr_regs[] = {
61     HV_SYS_REG_DBGBVR0_EL1,
62     HV_SYS_REG_DBGBVR1_EL1,
63     HV_SYS_REG_DBGBVR2_EL1,
64     HV_SYS_REG_DBGBVR3_EL1,
65     HV_SYS_REG_DBGBVR4_EL1,
66     HV_SYS_REG_DBGBVR5_EL1,
67     HV_SYS_REG_DBGBVR6_EL1,
68     HV_SYS_REG_DBGBVR7_EL1,
69     HV_SYS_REG_DBGBVR8_EL1,
70     HV_SYS_REG_DBGBVR9_EL1,
71     HV_SYS_REG_DBGBVR10_EL1,
72     HV_SYS_REG_DBGBVR11_EL1,
73     HV_SYS_REG_DBGBVR12_EL1,
74     HV_SYS_REG_DBGBVR13_EL1,
75     HV_SYS_REG_DBGBVR14_EL1,
76     HV_SYS_REG_DBGBVR15_EL1,
77 };
78 
79 static const uint16_t dbgwcr_regs[] = {
80     HV_SYS_REG_DBGWCR0_EL1,
81     HV_SYS_REG_DBGWCR1_EL1,
82     HV_SYS_REG_DBGWCR2_EL1,
83     HV_SYS_REG_DBGWCR3_EL1,
84     HV_SYS_REG_DBGWCR4_EL1,
85     HV_SYS_REG_DBGWCR5_EL1,
86     HV_SYS_REG_DBGWCR6_EL1,
87     HV_SYS_REG_DBGWCR7_EL1,
88     HV_SYS_REG_DBGWCR8_EL1,
89     HV_SYS_REG_DBGWCR9_EL1,
90     HV_SYS_REG_DBGWCR10_EL1,
91     HV_SYS_REG_DBGWCR11_EL1,
92     HV_SYS_REG_DBGWCR12_EL1,
93     HV_SYS_REG_DBGWCR13_EL1,
94     HV_SYS_REG_DBGWCR14_EL1,
95     HV_SYS_REG_DBGWCR15_EL1,
96 };
97 
98 static const uint16_t dbgwvr_regs[] = {
99     HV_SYS_REG_DBGWVR0_EL1,
100     HV_SYS_REG_DBGWVR1_EL1,
101     HV_SYS_REG_DBGWVR2_EL1,
102     HV_SYS_REG_DBGWVR3_EL1,
103     HV_SYS_REG_DBGWVR4_EL1,
104     HV_SYS_REG_DBGWVR5_EL1,
105     HV_SYS_REG_DBGWVR6_EL1,
106     HV_SYS_REG_DBGWVR7_EL1,
107     HV_SYS_REG_DBGWVR8_EL1,
108     HV_SYS_REG_DBGWVR9_EL1,
109     HV_SYS_REG_DBGWVR10_EL1,
110     HV_SYS_REG_DBGWVR11_EL1,
111     HV_SYS_REG_DBGWVR12_EL1,
112     HV_SYS_REG_DBGWVR13_EL1,
113     HV_SYS_REG_DBGWVR14_EL1,
114     HV_SYS_REG_DBGWVR15_EL1,
115 };
116 
hvf_arm_num_brps(hv_vcpu_config_t config)117 static inline int hvf_arm_num_brps(hv_vcpu_config_t config)
118 {
119     uint64_t val;
120     hv_return_t ret;
121     ret = hv_vcpu_config_get_feature_reg(config, HV_FEATURE_REG_ID_AA64DFR0_EL1,
122                                          &val);
123     assert_hvf_ok(ret);
124     return FIELD_EX64(val, ID_AA64DFR0, BRPS) + 1;
125 }
126 
hvf_arm_num_wrps(hv_vcpu_config_t config)127 static inline int hvf_arm_num_wrps(hv_vcpu_config_t config)
128 {
129     uint64_t val;
130     hv_return_t ret;
131     ret = hv_vcpu_config_get_feature_reg(config, HV_FEATURE_REG_ID_AA64DFR0_EL1,
132                                          &val);
133     assert_hvf_ok(ret);
134     return FIELD_EX64(val, ID_AA64DFR0, WRPS) + 1;
135 }
136 
hvf_arm_init_debug(void)137 void hvf_arm_init_debug(void)
138 {
139     hv_vcpu_config_t config;
140     config = hv_vcpu_config_create();
141 
142     max_hw_bps = hvf_arm_num_brps(config);
143     hw_breakpoints =
144         g_array_sized_new(true, true, sizeof(HWBreakpoint), max_hw_bps);
145 
146     max_hw_wps = hvf_arm_num_wrps(config);
147     hw_watchpoints =
148         g_array_sized_new(true, true, sizeof(HWWatchpoint), max_hw_wps);
149 }
150 
151 #define HVF_SYSREG(crn, crm, op0, op1, op2) \
152         ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
153 
154 #define SYSREG_OP0_SHIFT      20
155 #define SYSREG_OP0_MASK       0x3
156 #define SYSREG_OP0(sysreg)    ((sysreg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK)
157 #define SYSREG_OP1_SHIFT      14
158 #define SYSREG_OP1_MASK       0x7
159 #define SYSREG_OP1(sysreg)    ((sysreg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK)
160 #define SYSREG_CRN_SHIFT      10
161 #define SYSREG_CRN_MASK       0xf
162 #define SYSREG_CRN(sysreg)    ((sysreg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK)
163 #define SYSREG_CRM_SHIFT      1
164 #define SYSREG_CRM_MASK       0xf
165 #define SYSREG_CRM(sysreg)    ((sysreg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK)
166 #define SYSREG_OP2_SHIFT      17
167 #define SYSREG_OP2_MASK       0x7
168 #define SYSREG_OP2(sysreg)    ((sysreg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK)
169 
170 #define SYSREG(op0, op1, crn, crm, op2) \
171     ((op0 << SYSREG_OP0_SHIFT) | \
172      (op1 << SYSREG_OP1_SHIFT) | \
173      (crn << SYSREG_CRN_SHIFT) | \
174      (crm << SYSREG_CRM_SHIFT) | \
175      (op2 << SYSREG_OP2_SHIFT))
176 #define SYSREG_MASK \
177     SYSREG(SYSREG_OP0_MASK, \
178            SYSREG_OP1_MASK, \
179            SYSREG_CRN_MASK, \
180            SYSREG_CRM_MASK, \
181            SYSREG_OP2_MASK)
182 #define SYSREG_OSLAR_EL1      SYSREG(2, 0, 1, 0, 4)
183 #define SYSREG_OSLSR_EL1      SYSREG(2, 0, 1, 1, 4)
184 #define SYSREG_OSDLR_EL1      SYSREG(2, 0, 1, 3, 4)
185 #define SYSREG_CNTPCT_EL0     SYSREG(3, 3, 14, 0, 1)
186 #define SYSREG_PMCR_EL0       SYSREG(3, 3, 9, 12, 0)
187 #define SYSREG_PMUSERENR_EL0  SYSREG(3, 3, 9, 14, 0)
188 #define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1)
189 #define SYSREG_PMCNTENCLR_EL0 SYSREG(3, 3, 9, 12, 2)
190 #define SYSREG_PMINTENCLR_EL1 SYSREG(3, 0, 9, 14, 2)
191 #define SYSREG_PMOVSCLR_EL0   SYSREG(3, 3, 9, 12, 3)
192 #define SYSREG_PMSWINC_EL0    SYSREG(3, 3, 9, 12, 4)
193 #define SYSREG_PMSELR_EL0     SYSREG(3, 3, 9, 12, 5)
194 #define SYSREG_PMCEID0_EL0    SYSREG(3, 3, 9, 12, 6)
195 #define SYSREG_PMCEID1_EL0    SYSREG(3, 3, 9, 12, 7)
196 #define SYSREG_PMCCNTR_EL0    SYSREG(3, 3, 9, 13, 0)
197 #define SYSREG_PMCCFILTR_EL0  SYSREG(3, 3, 14, 15, 7)
198 
199 #define SYSREG_ICC_AP0R0_EL1     SYSREG(3, 0, 12, 8, 4)
200 #define SYSREG_ICC_AP0R1_EL1     SYSREG(3, 0, 12, 8, 5)
201 #define SYSREG_ICC_AP0R2_EL1     SYSREG(3, 0, 12, 8, 6)
202 #define SYSREG_ICC_AP0R3_EL1     SYSREG(3, 0, 12, 8, 7)
203 #define SYSREG_ICC_AP1R0_EL1     SYSREG(3, 0, 12, 9, 0)
204 #define SYSREG_ICC_AP1R1_EL1     SYSREG(3, 0, 12, 9, 1)
205 #define SYSREG_ICC_AP1R2_EL1     SYSREG(3, 0, 12, 9, 2)
206 #define SYSREG_ICC_AP1R3_EL1     SYSREG(3, 0, 12, 9, 3)
207 #define SYSREG_ICC_ASGI1R_EL1    SYSREG(3, 0, 12, 11, 6)
208 #define SYSREG_ICC_BPR0_EL1      SYSREG(3, 0, 12, 8, 3)
209 #define SYSREG_ICC_BPR1_EL1      SYSREG(3, 0, 12, 12, 3)
210 #define SYSREG_ICC_CTLR_EL1      SYSREG(3, 0, 12, 12, 4)
211 #define SYSREG_ICC_DIR_EL1       SYSREG(3, 0, 12, 11, 1)
212 #define SYSREG_ICC_EOIR0_EL1     SYSREG(3, 0, 12, 8, 1)
213 #define SYSREG_ICC_EOIR1_EL1     SYSREG(3, 0, 12, 12, 1)
214 #define SYSREG_ICC_HPPIR0_EL1    SYSREG(3, 0, 12, 8, 2)
215 #define SYSREG_ICC_HPPIR1_EL1    SYSREG(3, 0, 12, 12, 2)
216 #define SYSREG_ICC_IAR0_EL1      SYSREG(3, 0, 12, 8, 0)
217 #define SYSREG_ICC_IAR1_EL1      SYSREG(3, 0, 12, 12, 0)
218 #define SYSREG_ICC_IGRPEN0_EL1   SYSREG(3, 0, 12, 12, 6)
219 #define SYSREG_ICC_IGRPEN1_EL1   SYSREG(3, 0, 12, 12, 7)
220 #define SYSREG_ICC_PMR_EL1       SYSREG(3, 0, 4, 6, 0)
221 #define SYSREG_ICC_RPR_EL1       SYSREG(3, 0, 12, 11, 3)
222 #define SYSREG_ICC_SGI0R_EL1     SYSREG(3, 0, 12, 11, 7)
223 #define SYSREG_ICC_SGI1R_EL1     SYSREG(3, 0, 12, 11, 5)
224 #define SYSREG_ICC_SRE_EL1       SYSREG(3, 0, 12, 12, 5)
225 
226 #define SYSREG_MDSCR_EL1      SYSREG(2, 0, 0, 2, 2)
227 #define SYSREG_DBGBVR0_EL1    SYSREG(2, 0, 0, 0, 4)
228 #define SYSREG_DBGBCR0_EL1    SYSREG(2, 0, 0, 0, 5)
229 #define SYSREG_DBGWVR0_EL1    SYSREG(2, 0, 0, 0, 6)
230 #define SYSREG_DBGWCR0_EL1    SYSREG(2, 0, 0, 0, 7)
231 #define SYSREG_DBGBVR1_EL1    SYSREG(2, 0, 0, 1, 4)
232 #define SYSREG_DBGBCR1_EL1    SYSREG(2, 0, 0, 1, 5)
233 #define SYSREG_DBGWVR1_EL1    SYSREG(2, 0, 0, 1, 6)
234 #define SYSREG_DBGWCR1_EL1    SYSREG(2, 0, 0, 1, 7)
235 #define SYSREG_DBGBVR2_EL1    SYSREG(2, 0, 0, 2, 4)
236 #define SYSREG_DBGBCR2_EL1    SYSREG(2, 0, 0, 2, 5)
237 #define SYSREG_DBGWVR2_EL1    SYSREG(2, 0, 0, 2, 6)
238 #define SYSREG_DBGWCR2_EL1    SYSREG(2, 0, 0, 2, 7)
239 #define SYSREG_DBGBVR3_EL1    SYSREG(2, 0, 0, 3, 4)
240 #define SYSREG_DBGBCR3_EL1    SYSREG(2, 0, 0, 3, 5)
241 #define SYSREG_DBGWVR3_EL1    SYSREG(2, 0, 0, 3, 6)
242 #define SYSREG_DBGWCR3_EL1    SYSREG(2, 0, 0, 3, 7)
243 #define SYSREG_DBGBVR4_EL1    SYSREG(2, 0, 0, 4, 4)
244 #define SYSREG_DBGBCR4_EL1    SYSREG(2, 0, 0, 4, 5)
245 #define SYSREG_DBGWVR4_EL1    SYSREG(2, 0, 0, 4, 6)
246 #define SYSREG_DBGWCR4_EL1    SYSREG(2, 0, 0, 4, 7)
247 #define SYSREG_DBGBVR5_EL1    SYSREG(2, 0, 0, 5, 4)
248 #define SYSREG_DBGBCR5_EL1    SYSREG(2, 0, 0, 5, 5)
249 #define SYSREG_DBGWVR5_EL1    SYSREG(2, 0, 0, 5, 6)
250 #define SYSREG_DBGWCR5_EL1    SYSREG(2, 0, 0, 5, 7)
251 #define SYSREG_DBGBVR6_EL1    SYSREG(2, 0, 0, 6, 4)
252 #define SYSREG_DBGBCR6_EL1    SYSREG(2, 0, 0, 6, 5)
253 #define SYSREG_DBGWVR6_EL1    SYSREG(2, 0, 0, 6, 6)
254 #define SYSREG_DBGWCR6_EL1    SYSREG(2, 0, 0, 6, 7)
255 #define SYSREG_DBGBVR7_EL1    SYSREG(2, 0, 0, 7, 4)
256 #define SYSREG_DBGBCR7_EL1    SYSREG(2, 0, 0, 7, 5)
257 #define SYSREG_DBGWVR7_EL1    SYSREG(2, 0, 0, 7, 6)
258 #define SYSREG_DBGWCR7_EL1    SYSREG(2, 0, 0, 7, 7)
259 #define SYSREG_DBGBVR8_EL1    SYSREG(2, 0, 0, 8, 4)
260 #define SYSREG_DBGBCR8_EL1    SYSREG(2, 0, 0, 8, 5)
261 #define SYSREG_DBGWVR8_EL1    SYSREG(2, 0, 0, 8, 6)
262 #define SYSREG_DBGWCR8_EL1    SYSREG(2, 0, 0, 8, 7)
263 #define SYSREG_DBGBVR9_EL1    SYSREG(2, 0, 0, 9, 4)
264 #define SYSREG_DBGBCR9_EL1    SYSREG(2, 0, 0, 9, 5)
265 #define SYSREG_DBGWVR9_EL1    SYSREG(2, 0, 0, 9, 6)
266 #define SYSREG_DBGWCR9_EL1    SYSREG(2, 0, 0, 9, 7)
267 #define SYSREG_DBGBVR10_EL1   SYSREG(2, 0, 0, 10, 4)
268 #define SYSREG_DBGBCR10_EL1   SYSREG(2, 0, 0, 10, 5)
269 #define SYSREG_DBGWVR10_EL1   SYSREG(2, 0, 0, 10, 6)
270 #define SYSREG_DBGWCR10_EL1   SYSREG(2, 0, 0, 10, 7)
271 #define SYSREG_DBGBVR11_EL1   SYSREG(2, 0, 0, 11, 4)
272 #define SYSREG_DBGBCR11_EL1   SYSREG(2, 0, 0, 11, 5)
273 #define SYSREG_DBGWVR11_EL1   SYSREG(2, 0, 0, 11, 6)
274 #define SYSREG_DBGWCR11_EL1   SYSREG(2, 0, 0, 11, 7)
275 #define SYSREG_DBGBVR12_EL1   SYSREG(2, 0, 0, 12, 4)
276 #define SYSREG_DBGBCR12_EL1   SYSREG(2, 0, 0, 12, 5)
277 #define SYSREG_DBGWVR12_EL1   SYSREG(2, 0, 0, 12, 6)
278 #define SYSREG_DBGWCR12_EL1   SYSREG(2, 0, 0, 12, 7)
279 #define SYSREG_DBGBVR13_EL1   SYSREG(2, 0, 0, 13, 4)
280 #define SYSREG_DBGBCR13_EL1   SYSREG(2, 0, 0, 13, 5)
281 #define SYSREG_DBGWVR13_EL1   SYSREG(2, 0, 0, 13, 6)
282 #define SYSREG_DBGWCR13_EL1   SYSREG(2, 0, 0, 13, 7)
283 #define SYSREG_DBGBVR14_EL1   SYSREG(2, 0, 0, 14, 4)
284 #define SYSREG_DBGBCR14_EL1   SYSREG(2, 0, 0, 14, 5)
285 #define SYSREG_DBGWVR14_EL1   SYSREG(2, 0, 0, 14, 6)
286 #define SYSREG_DBGWCR14_EL1   SYSREG(2, 0, 0, 14, 7)
287 #define SYSREG_DBGBVR15_EL1   SYSREG(2, 0, 0, 15, 4)
288 #define SYSREG_DBGBCR15_EL1   SYSREG(2, 0, 0, 15, 5)
289 #define SYSREG_DBGWVR15_EL1   SYSREG(2, 0, 0, 15, 6)
290 #define SYSREG_DBGWCR15_EL1   SYSREG(2, 0, 0, 15, 7)
291 
292 #define WFX_IS_WFE (1 << 0)
293 
294 #define TMR_CTL_ENABLE  (1 << 0)
295 #define TMR_CTL_IMASK   (1 << 1)
296 #define TMR_CTL_ISTATUS (1 << 2)
297 
298 static void hvf_wfi(CPUState *cpu);
299 
300 typedef struct HVFVTimer {
301     /* Vtimer value during migration and paused state */
302     uint64_t vtimer_val;
303 } HVFVTimer;
304 
305 static HVFVTimer vtimer;
306 
307 typedef struct ARMHostCPUFeatures {
308     ARMISARegisters isar;
309     uint64_t features;
310     uint64_t midr;
311     uint32_t reset_sctlr;
312     const char *dtb_compatible;
313 } ARMHostCPUFeatures;
314 
315 static ARMHostCPUFeatures arm_host_cpu_features;
316 
317 struct hvf_reg_match {
318     int reg;
319     uint64_t offset;
320 };
321 
322 static const struct hvf_reg_match hvf_reg_match[] = {
323     { HV_REG_X0,   offsetof(CPUARMState, xregs[0]) },
324     { HV_REG_X1,   offsetof(CPUARMState, xregs[1]) },
325     { HV_REG_X2,   offsetof(CPUARMState, xregs[2]) },
326     { HV_REG_X3,   offsetof(CPUARMState, xregs[3]) },
327     { HV_REG_X4,   offsetof(CPUARMState, xregs[4]) },
328     { HV_REG_X5,   offsetof(CPUARMState, xregs[5]) },
329     { HV_REG_X6,   offsetof(CPUARMState, xregs[6]) },
330     { HV_REG_X7,   offsetof(CPUARMState, xregs[7]) },
331     { HV_REG_X8,   offsetof(CPUARMState, xregs[8]) },
332     { HV_REG_X9,   offsetof(CPUARMState, xregs[9]) },
333     { HV_REG_X10,  offsetof(CPUARMState, xregs[10]) },
334     { HV_REG_X11,  offsetof(CPUARMState, xregs[11]) },
335     { HV_REG_X12,  offsetof(CPUARMState, xregs[12]) },
336     { HV_REG_X13,  offsetof(CPUARMState, xregs[13]) },
337     { HV_REG_X14,  offsetof(CPUARMState, xregs[14]) },
338     { HV_REG_X15,  offsetof(CPUARMState, xregs[15]) },
339     { HV_REG_X16,  offsetof(CPUARMState, xregs[16]) },
340     { HV_REG_X17,  offsetof(CPUARMState, xregs[17]) },
341     { HV_REG_X18,  offsetof(CPUARMState, xregs[18]) },
342     { HV_REG_X19,  offsetof(CPUARMState, xregs[19]) },
343     { HV_REG_X20,  offsetof(CPUARMState, xregs[20]) },
344     { HV_REG_X21,  offsetof(CPUARMState, xregs[21]) },
345     { HV_REG_X22,  offsetof(CPUARMState, xregs[22]) },
346     { HV_REG_X23,  offsetof(CPUARMState, xregs[23]) },
347     { HV_REG_X24,  offsetof(CPUARMState, xregs[24]) },
348     { HV_REG_X25,  offsetof(CPUARMState, xregs[25]) },
349     { HV_REG_X26,  offsetof(CPUARMState, xregs[26]) },
350     { HV_REG_X27,  offsetof(CPUARMState, xregs[27]) },
351     { HV_REG_X28,  offsetof(CPUARMState, xregs[28]) },
352     { HV_REG_X29,  offsetof(CPUARMState, xregs[29]) },
353     { HV_REG_X30,  offsetof(CPUARMState, xregs[30]) },
354     { HV_REG_PC,   offsetof(CPUARMState, pc) },
355 };
356 
357 static const struct hvf_reg_match hvf_fpreg_match[] = {
358     { HV_SIMD_FP_REG_Q0,  offsetof(CPUARMState, vfp.zregs[0]) },
359     { HV_SIMD_FP_REG_Q1,  offsetof(CPUARMState, vfp.zregs[1]) },
360     { HV_SIMD_FP_REG_Q2,  offsetof(CPUARMState, vfp.zregs[2]) },
361     { HV_SIMD_FP_REG_Q3,  offsetof(CPUARMState, vfp.zregs[3]) },
362     { HV_SIMD_FP_REG_Q4,  offsetof(CPUARMState, vfp.zregs[4]) },
363     { HV_SIMD_FP_REG_Q5,  offsetof(CPUARMState, vfp.zregs[5]) },
364     { HV_SIMD_FP_REG_Q6,  offsetof(CPUARMState, vfp.zregs[6]) },
365     { HV_SIMD_FP_REG_Q7,  offsetof(CPUARMState, vfp.zregs[7]) },
366     { HV_SIMD_FP_REG_Q8,  offsetof(CPUARMState, vfp.zregs[8]) },
367     { HV_SIMD_FP_REG_Q9,  offsetof(CPUARMState, vfp.zregs[9]) },
368     { HV_SIMD_FP_REG_Q10, offsetof(CPUARMState, vfp.zregs[10]) },
369     { HV_SIMD_FP_REG_Q11, offsetof(CPUARMState, vfp.zregs[11]) },
370     { HV_SIMD_FP_REG_Q12, offsetof(CPUARMState, vfp.zregs[12]) },
371     { HV_SIMD_FP_REG_Q13, offsetof(CPUARMState, vfp.zregs[13]) },
372     { HV_SIMD_FP_REG_Q14, offsetof(CPUARMState, vfp.zregs[14]) },
373     { HV_SIMD_FP_REG_Q15, offsetof(CPUARMState, vfp.zregs[15]) },
374     { HV_SIMD_FP_REG_Q16, offsetof(CPUARMState, vfp.zregs[16]) },
375     { HV_SIMD_FP_REG_Q17, offsetof(CPUARMState, vfp.zregs[17]) },
376     { HV_SIMD_FP_REG_Q18, offsetof(CPUARMState, vfp.zregs[18]) },
377     { HV_SIMD_FP_REG_Q19, offsetof(CPUARMState, vfp.zregs[19]) },
378     { HV_SIMD_FP_REG_Q20, offsetof(CPUARMState, vfp.zregs[20]) },
379     { HV_SIMD_FP_REG_Q21, offsetof(CPUARMState, vfp.zregs[21]) },
380     { HV_SIMD_FP_REG_Q22, offsetof(CPUARMState, vfp.zregs[22]) },
381     { HV_SIMD_FP_REG_Q23, offsetof(CPUARMState, vfp.zregs[23]) },
382     { HV_SIMD_FP_REG_Q24, offsetof(CPUARMState, vfp.zregs[24]) },
383     { HV_SIMD_FP_REG_Q25, offsetof(CPUARMState, vfp.zregs[25]) },
384     { HV_SIMD_FP_REG_Q26, offsetof(CPUARMState, vfp.zregs[26]) },
385     { HV_SIMD_FP_REG_Q27, offsetof(CPUARMState, vfp.zregs[27]) },
386     { HV_SIMD_FP_REG_Q28, offsetof(CPUARMState, vfp.zregs[28]) },
387     { HV_SIMD_FP_REG_Q29, offsetof(CPUARMState, vfp.zregs[29]) },
388     { HV_SIMD_FP_REG_Q30, offsetof(CPUARMState, vfp.zregs[30]) },
389     { HV_SIMD_FP_REG_Q31, offsetof(CPUARMState, vfp.zregs[31]) },
390 };
391 
392 struct hvf_sreg_match {
393     int reg;
394     uint32_t key;
395     uint32_t cp_idx;
396 };
397 
398 static struct hvf_sreg_match hvf_sreg_match[] = {
399     { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 4) },
400     { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 5) },
401     { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 6) },
402     { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 7) },
403 
404     { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 4) },
405     { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 5) },
406     { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 6) },
407     { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 7) },
408 
409     { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 4) },
410     { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 5) },
411     { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 6) },
412     { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 7) },
413 
414     { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 4) },
415     { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 5) },
416     { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 6) },
417     { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 7) },
418 
419     { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 4) },
420     { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 5) },
421     { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 6) },
422     { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 7) },
423 
424     { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 4) },
425     { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 5) },
426     { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 6) },
427     { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 7) },
428 
429     { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 4) },
430     { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 5) },
431     { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 6) },
432     { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 7) },
433 
434     { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 4) },
435     { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 5) },
436     { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 6) },
437     { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 7) },
438 
439     { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 4) },
440     { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 5) },
441     { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 6) },
442     { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 7) },
443 
444     { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 4) },
445     { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 5) },
446     { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 6) },
447     { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 7) },
448 
449     { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 4) },
450     { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 5) },
451     { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 6) },
452     { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 7) },
453 
454     { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 4) },
455     { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 5) },
456     { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 6) },
457     { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 7) },
458 
459     { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 4) },
460     { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 5) },
461     { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 6) },
462     { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 7) },
463 
464     { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 4) },
465     { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 5) },
466     { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 6) },
467     { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 7) },
468 
469     { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 4) },
470     { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 5) },
471     { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 6) },
472     { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 7) },
473 
474     { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 4) },
475     { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 5) },
476     { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 6) },
477     { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 7) },
478 
479 #ifdef SYNC_NO_RAW_REGS
480     /*
481      * The registers below are manually synced on init because they are
482      * marked as NO_RAW. We still list them to make number space sync easier.
483      */
484     { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) },
485     { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) },
486     { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) },
487     { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) },
488 #endif
489     { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 2) },
490     { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) },
491     { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) },
492     { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) },
493     { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) },
494 #ifdef SYNC_NO_MMFR0
495     /* We keep the hardware MMFR0 around. HW limits are there anyway */
496     { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) },
497 #endif
498     { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) },
499     { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) },
500     /* Add ID_AA64MMFR3_EL1 here when HVF supports it */
501 
502     { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) },
503     { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) },
504     { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) },
505     { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) },
506     { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) },
507     { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) },
508 
509     { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) },
510     { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) },
511     { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) },
512     { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) },
513     { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) },
514     { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) },
515     { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) },
516     { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) },
517     { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) },
518     { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) },
519 
520     { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 0, 0) },
521     { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) },
522     { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) },
523     { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) },
524     { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) },
525     { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) },
526     { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) },
527     { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) },
528     { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) },
529     { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) },
530     { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) },
531     { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) },
532     { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) },
533     { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) },
534     { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) },
535     { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) },
536     { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) },
537     { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) },
538     { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) },
539     { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) },
540 };
541 
hvf_get_registers(CPUState * cpu)542 int hvf_get_registers(CPUState *cpu)
543 {
544     ARMCPU *arm_cpu = ARM_CPU(cpu);
545     CPUARMState *env = &arm_cpu->env;
546     hv_return_t ret;
547     uint64_t val;
548     hv_simd_fp_uchar16_t fpval;
549     int i;
550 
551     for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
552         ret = hv_vcpu_get_reg(cpu->accel->fd, hvf_reg_match[i].reg, &val);
553         *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val;
554         assert_hvf_ok(ret);
555     }
556 
557     for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
558         ret = hv_vcpu_get_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg,
559                                       &fpval);
560         memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval));
561         assert_hvf_ok(ret);
562     }
563 
564     val = 0;
565     ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPCR, &val);
566     assert_hvf_ok(ret);
567     vfp_set_fpcr(env, val);
568 
569     val = 0;
570     ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPSR, &val);
571     assert_hvf_ok(ret);
572     vfp_set_fpsr(env, val);
573 
574     ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_CPSR, &val);
575     assert_hvf_ok(ret);
576     pstate_write(env, val);
577 
578     for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
579         if (hvf_sreg_match[i].cp_idx == -1) {
580             continue;
581         }
582 
583         if (cpu->accel->guest_debug_enabled) {
584             /* Handle debug registers */
585             switch (hvf_sreg_match[i].reg) {
586             case HV_SYS_REG_DBGBVR0_EL1:
587             case HV_SYS_REG_DBGBCR0_EL1:
588             case HV_SYS_REG_DBGWVR0_EL1:
589             case HV_SYS_REG_DBGWCR0_EL1:
590             case HV_SYS_REG_DBGBVR1_EL1:
591             case HV_SYS_REG_DBGBCR1_EL1:
592             case HV_SYS_REG_DBGWVR1_EL1:
593             case HV_SYS_REG_DBGWCR1_EL1:
594             case HV_SYS_REG_DBGBVR2_EL1:
595             case HV_SYS_REG_DBGBCR2_EL1:
596             case HV_SYS_REG_DBGWVR2_EL1:
597             case HV_SYS_REG_DBGWCR2_EL1:
598             case HV_SYS_REG_DBGBVR3_EL1:
599             case HV_SYS_REG_DBGBCR3_EL1:
600             case HV_SYS_REG_DBGWVR3_EL1:
601             case HV_SYS_REG_DBGWCR3_EL1:
602             case HV_SYS_REG_DBGBVR4_EL1:
603             case HV_SYS_REG_DBGBCR4_EL1:
604             case HV_SYS_REG_DBGWVR4_EL1:
605             case HV_SYS_REG_DBGWCR4_EL1:
606             case HV_SYS_REG_DBGBVR5_EL1:
607             case HV_SYS_REG_DBGBCR5_EL1:
608             case HV_SYS_REG_DBGWVR5_EL1:
609             case HV_SYS_REG_DBGWCR5_EL1:
610             case HV_SYS_REG_DBGBVR6_EL1:
611             case HV_SYS_REG_DBGBCR6_EL1:
612             case HV_SYS_REG_DBGWVR6_EL1:
613             case HV_SYS_REG_DBGWCR6_EL1:
614             case HV_SYS_REG_DBGBVR7_EL1:
615             case HV_SYS_REG_DBGBCR7_EL1:
616             case HV_SYS_REG_DBGWVR7_EL1:
617             case HV_SYS_REG_DBGWCR7_EL1:
618             case HV_SYS_REG_DBGBVR8_EL1:
619             case HV_SYS_REG_DBGBCR8_EL1:
620             case HV_SYS_REG_DBGWVR8_EL1:
621             case HV_SYS_REG_DBGWCR8_EL1:
622             case HV_SYS_REG_DBGBVR9_EL1:
623             case HV_SYS_REG_DBGBCR9_EL1:
624             case HV_SYS_REG_DBGWVR9_EL1:
625             case HV_SYS_REG_DBGWCR9_EL1:
626             case HV_SYS_REG_DBGBVR10_EL1:
627             case HV_SYS_REG_DBGBCR10_EL1:
628             case HV_SYS_REG_DBGWVR10_EL1:
629             case HV_SYS_REG_DBGWCR10_EL1:
630             case HV_SYS_REG_DBGBVR11_EL1:
631             case HV_SYS_REG_DBGBCR11_EL1:
632             case HV_SYS_REG_DBGWVR11_EL1:
633             case HV_SYS_REG_DBGWCR11_EL1:
634             case HV_SYS_REG_DBGBVR12_EL1:
635             case HV_SYS_REG_DBGBCR12_EL1:
636             case HV_SYS_REG_DBGWVR12_EL1:
637             case HV_SYS_REG_DBGWCR12_EL1:
638             case HV_SYS_REG_DBGBVR13_EL1:
639             case HV_SYS_REG_DBGBCR13_EL1:
640             case HV_SYS_REG_DBGWVR13_EL1:
641             case HV_SYS_REG_DBGWCR13_EL1:
642             case HV_SYS_REG_DBGBVR14_EL1:
643             case HV_SYS_REG_DBGBCR14_EL1:
644             case HV_SYS_REG_DBGWVR14_EL1:
645             case HV_SYS_REG_DBGWCR14_EL1:
646             case HV_SYS_REG_DBGBVR15_EL1:
647             case HV_SYS_REG_DBGBCR15_EL1:
648             case HV_SYS_REG_DBGWVR15_EL1:
649             case HV_SYS_REG_DBGWCR15_EL1: {
650                 /*
651                  * If the guest is being debugged, the vCPU's debug registers
652                  * are holding the gdbstub's view of the registers (set in
653                  * hvf_arch_update_guest_debug()).
654                  * Since the environment is used to store only the guest's view
655                  * of the registers, don't update it with the values from the
656                  * vCPU but simply keep the values from the previous
657                  * environment.
658                  */
659                 const ARMCPRegInfo *ri;
660                 ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_sreg_match[i].key);
661                 val = read_raw_cp_reg(env, ri);
662 
663                 arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
664                 continue;
665             }
666             }
667         }
668 
669         ret = hv_vcpu_get_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, &val);
670         assert_hvf_ok(ret);
671 
672         arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
673     }
674     assert(write_list_to_cpustate(arm_cpu));
675 
676     aarch64_restore_sp(env, arm_current_el(env));
677 
678     return 0;
679 }
680 
hvf_put_registers(CPUState * cpu)681 int hvf_put_registers(CPUState *cpu)
682 {
683     ARMCPU *arm_cpu = ARM_CPU(cpu);
684     CPUARMState *env = &arm_cpu->env;
685     hv_return_t ret;
686     uint64_t val;
687     hv_simd_fp_uchar16_t fpval;
688     int i;
689 
690     for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
691         val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset);
692         ret = hv_vcpu_set_reg(cpu->accel->fd, hvf_reg_match[i].reg, val);
693         assert_hvf_ok(ret);
694     }
695 
696     for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
697         memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval));
698         ret = hv_vcpu_set_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg,
699                                       fpval);
700         assert_hvf_ok(ret);
701     }
702 
703     ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPCR, vfp_get_fpcr(env));
704     assert_hvf_ok(ret);
705 
706     ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPSR, vfp_get_fpsr(env));
707     assert_hvf_ok(ret);
708 
709     ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_CPSR, pstate_read(env));
710     assert_hvf_ok(ret);
711 
712     aarch64_save_sp(env, arm_current_el(env));
713 
714     assert(write_cpustate_to_list(arm_cpu, false));
715     for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
716         if (hvf_sreg_match[i].cp_idx == -1) {
717             continue;
718         }
719 
720         if (cpu->accel->guest_debug_enabled) {
721             /* Handle debug registers */
722             switch (hvf_sreg_match[i].reg) {
723             case HV_SYS_REG_DBGBVR0_EL1:
724             case HV_SYS_REG_DBGBCR0_EL1:
725             case HV_SYS_REG_DBGWVR0_EL1:
726             case HV_SYS_REG_DBGWCR0_EL1:
727             case HV_SYS_REG_DBGBVR1_EL1:
728             case HV_SYS_REG_DBGBCR1_EL1:
729             case HV_SYS_REG_DBGWVR1_EL1:
730             case HV_SYS_REG_DBGWCR1_EL1:
731             case HV_SYS_REG_DBGBVR2_EL1:
732             case HV_SYS_REG_DBGBCR2_EL1:
733             case HV_SYS_REG_DBGWVR2_EL1:
734             case HV_SYS_REG_DBGWCR2_EL1:
735             case HV_SYS_REG_DBGBVR3_EL1:
736             case HV_SYS_REG_DBGBCR3_EL1:
737             case HV_SYS_REG_DBGWVR3_EL1:
738             case HV_SYS_REG_DBGWCR3_EL1:
739             case HV_SYS_REG_DBGBVR4_EL1:
740             case HV_SYS_REG_DBGBCR4_EL1:
741             case HV_SYS_REG_DBGWVR4_EL1:
742             case HV_SYS_REG_DBGWCR4_EL1:
743             case HV_SYS_REG_DBGBVR5_EL1:
744             case HV_SYS_REG_DBGBCR5_EL1:
745             case HV_SYS_REG_DBGWVR5_EL1:
746             case HV_SYS_REG_DBGWCR5_EL1:
747             case HV_SYS_REG_DBGBVR6_EL1:
748             case HV_SYS_REG_DBGBCR6_EL1:
749             case HV_SYS_REG_DBGWVR6_EL1:
750             case HV_SYS_REG_DBGWCR6_EL1:
751             case HV_SYS_REG_DBGBVR7_EL1:
752             case HV_SYS_REG_DBGBCR7_EL1:
753             case HV_SYS_REG_DBGWVR7_EL1:
754             case HV_SYS_REG_DBGWCR7_EL1:
755             case HV_SYS_REG_DBGBVR8_EL1:
756             case HV_SYS_REG_DBGBCR8_EL1:
757             case HV_SYS_REG_DBGWVR8_EL1:
758             case HV_SYS_REG_DBGWCR8_EL1:
759             case HV_SYS_REG_DBGBVR9_EL1:
760             case HV_SYS_REG_DBGBCR9_EL1:
761             case HV_SYS_REG_DBGWVR9_EL1:
762             case HV_SYS_REG_DBGWCR9_EL1:
763             case HV_SYS_REG_DBGBVR10_EL1:
764             case HV_SYS_REG_DBGBCR10_EL1:
765             case HV_SYS_REG_DBGWVR10_EL1:
766             case HV_SYS_REG_DBGWCR10_EL1:
767             case HV_SYS_REG_DBGBVR11_EL1:
768             case HV_SYS_REG_DBGBCR11_EL1:
769             case HV_SYS_REG_DBGWVR11_EL1:
770             case HV_SYS_REG_DBGWCR11_EL1:
771             case HV_SYS_REG_DBGBVR12_EL1:
772             case HV_SYS_REG_DBGBCR12_EL1:
773             case HV_SYS_REG_DBGWVR12_EL1:
774             case HV_SYS_REG_DBGWCR12_EL1:
775             case HV_SYS_REG_DBGBVR13_EL1:
776             case HV_SYS_REG_DBGBCR13_EL1:
777             case HV_SYS_REG_DBGWVR13_EL1:
778             case HV_SYS_REG_DBGWCR13_EL1:
779             case HV_SYS_REG_DBGBVR14_EL1:
780             case HV_SYS_REG_DBGBCR14_EL1:
781             case HV_SYS_REG_DBGWVR14_EL1:
782             case HV_SYS_REG_DBGWCR14_EL1:
783             case HV_SYS_REG_DBGBVR15_EL1:
784             case HV_SYS_REG_DBGBCR15_EL1:
785             case HV_SYS_REG_DBGWVR15_EL1:
786             case HV_SYS_REG_DBGWCR15_EL1:
787                 /*
788                  * If the guest is being debugged, the vCPU's debug registers
789                  * are already holding the gdbstub's view of the registers (set
790                  * in hvf_arch_update_guest_debug()).
791                  */
792                 continue;
793             }
794         }
795 
796         val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx];
797         ret = hv_vcpu_set_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, val);
798         assert_hvf_ok(ret);
799     }
800 
801     ret = hv_vcpu_set_vtimer_offset(cpu->accel->fd, hvf_state->vtimer_offset);
802     assert_hvf_ok(ret);
803 
804     return 0;
805 }
806 
flush_cpu_state(CPUState * cpu)807 static void flush_cpu_state(CPUState *cpu)
808 {
809     if (cpu->accel->dirty) {
810         hvf_put_registers(cpu);
811         cpu->accel->dirty = false;
812     }
813 }
814 
hvf_set_reg(CPUState * cpu,int rt,uint64_t val)815 static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val)
816 {
817     hv_return_t r;
818 
819     flush_cpu_state(cpu);
820 
821     if (rt < 31) {
822         r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_X0 + rt, val);
823         assert_hvf_ok(r);
824     }
825 }
826 
hvf_get_reg(CPUState * cpu,int rt)827 static uint64_t hvf_get_reg(CPUState *cpu, int rt)
828 {
829     uint64_t val = 0;
830     hv_return_t r;
831 
832     flush_cpu_state(cpu);
833 
834     if (rt < 31) {
835         r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_X0 + rt, &val);
836         assert_hvf_ok(r);
837     }
838 
839     return val;
840 }
841 
hvf_arm_get_host_cpu_features(ARMHostCPUFeatures * ahcf)842 static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
843 {
844     ARMISARegisters host_isar = {};
845     const struct isar_regs {
846         int reg;
847         uint64_t *val;
848     } regs[] = {
849         { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 },
850         { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 },
851         { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 },
852         { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 },
853         { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 },
854         { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 },
855         /* Add ID_AA64ISAR2_EL1 here when HVF supports it */
856         { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 },
857         { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 },
858         { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 },
859         /* Add ID_AA64MMFR3_EL1 here when HVF supports it */
860     };
861     hv_vcpu_t fd;
862     hv_return_t r = HV_SUCCESS;
863     hv_vcpu_exit_t *exit;
864     int i;
865 
866     ahcf->dtb_compatible = "arm,arm-v8";
867     ahcf->features = (1ULL << ARM_FEATURE_V8) |
868                      (1ULL << ARM_FEATURE_NEON) |
869                      (1ULL << ARM_FEATURE_AARCH64) |
870                      (1ULL << ARM_FEATURE_PMU) |
871                      (1ULL << ARM_FEATURE_GENERIC_TIMER);
872 
873     /* We set up a small vcpu to extract host registers */
874 
875     if (hv_vcpu_create(&fd, &exit, NULL) != HV_SUCCESS) {
876         return false;
877     }
878 
879     for (i = 0; i < ARRAY_SIZE(regs); i++) {
880         r |= hv_vcpu_get_sys_reg(fd, regs[i].reg, regs[i].val);
881     }
882     r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr);
883     r |= hv_vcpu_destroy(fd);
884 
885     ahcf->isar = host_isar;
886 
887     /*
888      * A scratch vCPU returns SCTLR 0, so let's fill our default with the M1
889      * boot SCTLR from https://github.com/AsahiLinux/m1n1/issues/97
890      */
891     ahcf->reset_sctlr = 0x30100180;
892     /*
893      * SPAN is disabled by default when SCTLR.SPAN=1. To improve compatibility,
894      * let's disable it on boot and then allow guest software to turn it on by
895      * setting it to 0.
896      */
897     ahcf->reset_sctlr |= 0x00800000;
898 
899     /* Make sure we don't advertise AArch32 support for EL0/EL1 */
900     if ((host_isar.id_aa64pfr0 & 0xff) != 0x11) {
901         return false;
902     }
903 
904     return r == HV_SUCCESS;
905 }
906 
hvf_arm_set_cpu_features_from_host(ARMCPU * cpu)907 void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu)
908 {
909     if (!arm_host_cpu_features.dtb_compatible) {
910         if (!hvf_enabled() ||
911             !hvf_arm_get_host_cpu_features(&arm_host_cpu_features)) {
912             /*
913              * We can't report this error yet, so flag that we need to
914              * in arm_cpu_realizefn().
915              */
916             cpu->host_cpu_probe_failed = true;
917             return;
918         }
919     }
920 
921     cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
922     cpu->isar = arm_host_cpu_features.isar;
923     cpu->env.features = arm_host_cpu_features.features;
924     cpu->midr = arm_host_cpu_features.midr;
925     cpu->reset_sctlr = arm_host_cpu_features.reset_sctlr;
926 }
927 
hvf_arch_vcpu_destroy(CPUState * cpu)928 void hvf_arch_vcpu_destroy(CPUState *cpu)
929 {
930 }
931 
hvf_arch_init_vcpu(CPUState * cpu)932 int hvf_arch_init_vcpu(CPUState *cpu)
933 {
934     ARMCPU *arm_cpu = ARM_CPU(cpu);
935     CPUARMState *env = &arm_cpu->env;
936     uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match);
937     uint32_t sregs_cnt = 0;
938     uint64_t pfr;
939     hv_return_t ret;
940     int i;
941 
942     env->aarch64 = true;
943     asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz));
944 
945     /* Allocate enough space for our sysreg sync */
946     arm_cpu->cpreg_indexes = g_renew(uint64_t, arm_cpu->cpreg_indexes,
947                                      sregs_match_len);
948     arm_cpu->cpreg_values = g_renew(uint64_t, arm_cpu->cpreg_values,
949                                     sregs_match_len);
950     arm_cpu->cpreg_vmstate_indexes = g_renew(uint64_t,
951                                              arm_cpu->cpreg_vmstate_indexes,
952                                              sregs_match_len);
953     arm_cpu->cpreg_vmstate_values = g_renew(uint64_t,
954                                             arm_cpu->cpreg_vmstate_values,
955                                             sregs_match_len);
956 
957     memset(arm_cpu->cpreg_values, 0, sregs_match_len * sizeof(uint64_t));
958 
959     /* Populate cp list for all known sysregs */
960     for (i = 0; i < sregs_match_len; i++) {
961         const ARMCPRegInfo *ri;
962         uint32_t key = hvf_sreg_match[i].key;
963 
964         ri = get_arm_cp_reginfo(arm_cpu->cp_regs, key);
965         if (ri) {
966             assert(!(ri->type & ARM_CP_NO_RAW));
967             hvf_sreg_match[i].cp_idx = sregs_cnt;
968             arm_cpu->cpreg_indexes[sregs_cnt++] = cpreg_to_kvm_id(key);
969         } else {
970             hvf_sreg_match[i].cp_idx = -1;
971         }
972     }
973     arm_cpu->cpreg_array_len = sregs_cnt;
974     arm_cpu->cpreg_vmstate_array_len = sregs_cnt;
975 
976     assert(write_cpustate_to_list(arm_cpu, false));
977 
978     /* Set CP_NO_RAW system registers on init */
979     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MIDR_EL1,
980                               arm_cpu->midr);
981     assert_hvf_ok(ret);
982 
983     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MPIDR_EL1,
984                               arm_cpu->mp_affinity);
985     assert_hvf_ok(ret);
986 
987     ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr);
988     assert_hvf_ok(ret);
989     pfr |= env->gicv3state ? (1 << 24) : 0;
990     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr);
991     assert_hvf_ok(ret);
992 
993     /* We're limited to underlying hardware caps, override internal versions */
994     ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
995                               &arm_cpu->isar.id_aa64mmfr0);
996     assert_hvf_ok(ret);
997 
998     return 0;
999 }
1000 
hvf_kick_vcpu_thread(CPUState * cpu)1001 void hvf_kick_vcpu_thread(CPUState *cpu)
1002 {
1003     cpus_kick_thread(cpu);
1004     hv_vcpus_exit(&cpu->accel->fd, 1);
1005 }
1006 
hvf_raise_exception(CPUState * cpu,uint32_t excp,uint32_t syndrome)1007 static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
1008                                 uint32_t syndrome)
1009 {
1010     ARMCPU *arm_cpu = ARM_CPU(cpu);
1011     CPUARMState *env = &arm_cpu->env;
1012 
1013     cpu->exception_index = excp;
1014     env->exception.target_el = 1;
1015     env->exception.syndrome = syndrome;
1016 
1017     arm_cpu_do_interrupt(cpu);
1018 }
1019 
hvf_psci_cpu_off(ARMCPU * arm_cpu)1020 static void hvf_psci_cpu_off(ARMCPU *arm_cpu)
1021 {
1022     int32_t ret = arm_set_cpu_off(arm_cpu_mp_affinity(arm_cpu));
1023     assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS);
1024 }
1025 
1026 /*
1027  * Handle a PSCI call.
1028  *
1029  * Returns 0 on success
1030  *         -1 when the PSCI call is unknown,
1031  */
hvf_handle_psci_call(CPUState * cpu)1032 static bool hvf_handle_psci_call(CPUState *cpu)
1033 {
1034     ARMCPU *arm_cpu = ARM_CPU(cpu);
1035     CPUARMState *env = &arm_cpu->env;
1036     uint64_t param[4] = {
1037         env->xregs[0],
1038         env->xregs[1],
1039         env->xregs[2],
1040         env->xregs[3]
1041     };
1042     uint64_t context_id, mpidr;
1043     bool target_aarch64 = true;
1044     CPUState *target_cpu_state;
1045     ARMCPU *target_cpu;
1046     target_ulong entry;
1047     int target_el = 1;
1048     int32_t ret = 0;
1049 
1050     trace_hvf_psci_call(param[0], param[1], param[2], param[3],
1051                         arm_cpu_mp_affinity(arm_cpu));
1052 
1053     switch (param[0]) {
1054     case QEMU_PSCI_0_2_FN_PSCI_VERSION:
1055         ret = QEMU_PSCI_VERSION_1_1;
1056         break;
1057     case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
1058         ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */
1059         break;
1060     case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
1061     case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
1062         mpidr = param[1];
1063 
1064         switch (param[2]) {
1065         case 0:
1066             target_cpu_state = arm_get_cpu_by_id(mpidr);
1067             if (!target_cpu_state) {
1068                 ret = QEMU_PSCI_RET_INVALID_PARAMS;
1069                 break;
1070             }
1071             target_cpu = ARM_CPU(target_cpu_state);
1072 
1073             ret = target_cpu->power_state;
1074             break;
1075         default:
1076             /* Everything above affinity level 0 is always on. */
1077             ret = 0;
1078         }
1079         break;
1080     case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
1081         qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
1082         /*
1083          * QEMU reset and shutdown are async requests, but PSCI
1084          * mandates that we never return from the reset/shutdown
1085          * call, so power the CPU off now so it doesn't execute
1086          * anything further.
1087          */
1088         hvf_psci_cpu_off(arm_cpu);
1089         break;
1090     case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
1091         qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
1092         hvf_psci_cpu_off(arm_cpu);
1093         break;
1094     case QEMU_PSCI_0_1_FN_CPU_ON:
1095     case QEMU_PSCI_0_2_FN_CPU_ON:
1096     case QEMU_PSCI_0_2_FN64_CPU_ON:
1097         mpidr = param[1];
1098         entry = param[2];
1099         context_id = param[3];
1100         ret = arm_set_cpu_on(mpidr, entry, context_id,
1101                              target_el, target_aarch64);
1102         break;
1103     case QEMU_PSCI_0_1_FN_CPU_OFF:
1104     case QEMU_PSCI_0_2_FN_CPU_OFF:
1105         hvf_psci_cpu_off(arm_cpu);
1106         break;
1107     case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
1108     case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
1109     case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
1110         /* Affinity levels are not supported in QEMU */
1111         if (param[1] & 0xfffe0000) {
1112             ret = QEMU_PSCI_RET_INVALID_PARAMS;
1113             break;
1114         }
1115         /* Powerdown is not supported, we always go into WFI */
1116         env->xregs[0] = 0;
1117         hvf_wfi(cpu);
1118         break;
1119     case QEMU_PSCI_0_1_FN_MIGRATE:
1120     case QEMU_PSCI_0_2_FN_MIGRATE:
1121         ret = QEMU_PSCI_RET_NOT_SUPPORTED;
1122         break;
1123     case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
1124         switch (param[1]) {
1125         case QEMU_PSCI_0_2_FN_PSCI_VERSION:
1126         case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
1127         case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
1128         case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
1129         case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
1130         case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
1131         case QEMU_PSCI_0_1_FN_CPU_ON:
1132         case QEMU_PSCI_0_2_FN_CPU_ON:
1133         case QEMU_PSCI_0_2_FN64_CPU_ON:
1134         case QEMU_PSCI_0_1_FN_CPU_OFF:
1135         case QEMU_PSCI_0_2_FN_CPU_OFF:
1136         case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
1137         case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
1138         case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
1139         case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
1140             ret = 0;
1141             break;
1142         case QEMU_PSCI_0_1_FN_MIGRATE:
1143         case QEMU_PSCI_0_2_FN_MIGRATE:
1144         default:
1145             ret = QEMU_PSCI_RET_NOT_SUPPORTED;
1146         }
1147         break;
1148     default:
1149         return false;
1150     }
1151 
1152     env->xregs[0] = ret;
1153     return true;
1154 }
1155 
is_id_sysreg(uint32_t reg)1156 static bool is_id_sysreg(uint32_t reg)
1157 {
1158     return SYSREG_OP0(reg) == 3 &&
1159            SYSREG_OP1(reg) == 0 &&
1160            SYSREG_CRN(reg) == 0 &&
1161            SYSREG_CRM(reg) >= 1 &&
1162            SYSREG_CRM(reg) < 8;
1163 }
1164 
hvf_reg2cp_reg(uint32_t reg)1165 static uint32_t hvf_reg2cp_reg(uint32_t reg)
1166 {
1167     return ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1168                               (reg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK,
1169                               (reg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK,
1170                               (reg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK,
1171                               (reg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK,
1172                               (reg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK);
1173 }
1174 
hvf_sysreg_read_cp(CPUState * cpu,uint32_t reg,uint64_t * val)1175 static bool hvf_sysreg_read_cp(CPUState *cpu, uint32_t reg, uint64_t *val)
1176 {
1177     ARMCPU *arm_cpu = ARM_CPU(cpu);
1178     CPUARMState *env = &arm_cpu->env;
1179     const ARMCPRegInfo *ri;
1180 
1181     ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
1182     if (ri) {
1183         if (ri->accessfn) {
1184             if (ri->accessfn(env, ri, true) != CP_ACCESS_OK) {
1185                 return false;
1186             }
1187         }
1188         if (ri->type & ARM_CP_CONST) {
1189             *val = ri->resetvalue;
1190         } else if (ri->readfn) {
1191             *val = ri->readfn(env, ri);
1192         } else {
1193             *val = CPREG_FIELD64(env, ri);
1194         }
1195         trace_hvf_vgic_read(ri->name, *val);
1196         return true;
1197     }
1198 
1199     return false;
1200 }
1201 
hvf_sysreg_read(CPUState * cpu,uint32_t reg,uint32_t rt)1202 static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
1203 {
1204     ARMCPU *arm_cpu = ARM_CPU(cpu);
1205     CPUARMState *env = &arm_cpu->env;
1206     uint64_t val = 0;
1207 
1208     switch (reg) {
1209     case SYSREG_CNTPCT_EL0:
1210         val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
1211               gt_cntfrq_period_ns(arm_cpu);
1212         break;
1213     case SYSREG_PMCR_EL0:
1214         val = env->cp15.c9_pmcr;
1215         break;
1216     case SYSREG_PMCCNTR_EL0:
1217         pmu_op_start(env);
1218         val = env->cp15.c15_ccnt;
1219         pmu_op_finish(env);
1220         break;
1221     case SYSREG_PMCNTENCLR_EL0:
1222         val = env->cp15.c9_pmcnten;
1223         break;
1224     case SYSREG_PMOVSCLR_EL0:
1225         val = env->cp15.c9_pmovsr;
1226         break;
1227     case SYSREG_PMSELR_EL0:
1228         val = env->cp15.c9_pmselr;
1229         break;
1230     case SYSREG_PMINTENCLR_EL1:
1231         val = env->cp15.c9_pminten;
1232         break;
1233     case SYSREG_PMCCFILTR_EL0:
1234         val = env->cp15.pmccfiltr_el0;
1235         break;
1236     case SYSREG_PMCNTENSET_EL0:
1237         val = env->cp15.c9_pmcnten;
1238         break;
1239     case SYSREG_PMUSERENR_EL0:
1240         val = env->cp15.c9_pmuserenr;
1241         break;
1242     case SYSREG_PMCEID0_EL0:
1243     case SYSREG_PMCEID1_EL0:
1244         /* We can't really count anything yet, declare all events invalid */
1245         val = 0;
1246         break;
1247     case SYSREG_OSLSR_EL1:
1248         val = env->cp15.oslsr_el1;
1249         break;
1250     case SYSREG_OSDLR_EL1:
1251         /* Dummy register */
1252         break;
1253     case SYSREG_ICC_AP0R0_EL1:
1254     case SYSREG_ICC_AP0R1_EL1:
1255     case SYSREG_ICC_AP0R2_EL1:
1256     case SYSREG_ICC_AP0R3_EL1:
1257     case SYSREG_ICC_AP1R0_EL1:
1258     case SYSREG_ICC_AP1R1_EL1:
1259     case SYSREG_ICC_AP1R2_EL1:
1260     case SYSREG_ICC_AP1R3_EL1:
1261     case SYSREG_ICC_ASGI1R_EL1:
1262     case SYSREG_ICC_BPR0_EL1:
1263     case SYSREG_ICC_BPR1_EL1:
1264     case SYSREG_ICC_DIR_EL1:
1265     case SYSREG_ICC_EOIR0_EL1:
1266     case SYSREG_ICC_EOIR1_EL1:
1267     case SYSREG_ICC_HPPIR0_EL1:
1268     case SYSREG_ICC_HPPIR1_EL1:
1269     case SYSREG_ICC_IAR0_EL1:
1270     case SYSREG_ICC_IAR1_EL1:
1271     case SYSREG_ICC_IGRPEN0_EL1:
1272     case SYSREG_ICC_IGRPEN1_EL1:
1273     case SYSREG_ICC_PMR_EL1:
1274     case SYSREG_ICC_SGI0R_EL1:
1275     case SYSREG_ICC_SGI1R_EL1:
1276     case SYSREG_ICC_SRE_EL1:
1277     case SYSREG_ICC_CTLR_EL1:
1278         /* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
1279         if (!hvf_sysreg_read_cp(cpu, reg, &val)) {
1280             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1281         }
1282         break;
1283     case SYSREG_DBGBVR0_EL1:
1284     case SYSREG_DBGBVR1_EL1:
1285     case SYSREG_DBGBVR2_EL1:
1286     case SYSREG_DBGBVR3_EL1:
1287     case SYSREG_DBGBVR4_EL1:
1288     case SYSREG_DBGBVR5_EL1:
1289     case SYSREG_DBGBVR6_EL1:
1290     case SYSREG_DBGBVR7_EL1:
1291     case SYSREG_DBGBVR8_EL1:
1292     case SYSREG_DBGBVR9_EL1:
1293     case SYSREG_DBGBVR10_EL1:
1294     case SYSREG_DBGBVR11_EL1:
1295     case SYSREG_DBGBVR12_EL1:
1296     case SYSREG_DBGBVR13_EL1:
1297     case SYSREG_DBGBVR14_EL1:
1298     case SYSREG_DBGBVR15_EL1:
1299         val = env->cp15.dbgbvr[SYSREG_CRM(reg)];
1300         break;
1301     case SYSREG_DBGBCR0_EL1:
1302     case SYSREG_DBGBCR1_EL1:
1303     case SYSREG_DBGBCR2_EL1:
1304     case SYSREG_DBGBCR3_EL1:
1305     case SYSREG_DBGBCR4_EL1:
1306     case SYSREG_DBGBCR5_EL1:
1307     case SYSREG_DBGBCR6_EL1:
1308     case SYSREG_DBGBCR7_EL1:
1309     case SYSREG_DBGBCR8_EL1:
1310     case SYSREG_DBGBCR9_EL1:
1311     case SYSREG_DBGBCR10_EL1:
1312     case SYSREG_DBGBCR11_EL1:
1313     case SYSREG_DBGBCR12_EL1:
1314     case SYSREG_DBGBCR13_EL1:
1315     case SYSREG_DBGBCR14_EL1:
1316     case SYSREG_DBGBCR15_EL1:
1317         val = env->cp15.dbgbcr[SYSREG_CRM(reg)];
1318         break;
1319     case SYSREG_DBGWVR0_EL1:
1320     case SYSREG_DBGWVR1_EL1:
1321     case SYSREG_DBGWVR2_EL1:
1322     case SYSREG_DBGWVR3_EL1:
1323     case SYSREG_DBGWVR4_EL1:
1324     case SYSREG_DBGWVR5_EL1:
1325     case SYSREG_DBGWVR6_EL1:
1326     case SYSREG_DBGWVR7_EL1:
1327     case SYSREG_DBGWVR8_EL1:
1328     case SYSREG_DBGWVR9_EL1:
1329     case SYSREG_DBGWVR10_EL1:
1330     case SYSREG_DBGWVR11_EL1:
1331     case SYSREG_DBGWVR12_EL1:
1332     case SYSREG_DBGWVR13_EL1:
1333     case SYSREG_DBGWVR14_EL1:
1334     case SYSREG_DBGWVR15_EL1:
1335         val = env->cp15.dbgwvr[SYSREG_CRM(reg)];
1336         break;
1337     case SYSREG_DBGWCR0_EL1:
1338     case SYSREG_DBGWCR1_EL1:
1339     case SYSREG_DBGWCR2_EL1:
1340     case SYSREG_DBGWCR3_EL1:
1341     case SYSREG_DBGWCR4_EL1:
1342     case SYSREG_DBGWCR5_EL1:
1343     case SYSREG_DBGWCR6_EL1:
1344     case SYSREG_DBGWCR7_EL1:
1345     case SYSREG_DBGWCR8_EL1:
1346     case SYSREG_DBGWCR9_EL1:
1347     case SYSREG_DBGWCR10_EL1:
1348     case SYSREG_DBGWCR11_EL1:
1349     case SYSREG_DBGWCR12_EL1:
1350     case SYSREG_DBGWCR13_EL1:
1351     case SYSREG_DBGWCR14_EL1:
1352     case SYSREG_DBGWCR15_EL1:
1353         val = env->cp15.dbgwcr[SYSREG_CRM(reg)];
1354         break;
1355     default:
1356         if (is_id_sysreg(reg)) {
1357             /* ID system registers read as RES0 */
1358             val = 0;
1359             break;
1360         }
1361         cpu_synchronize_state(cpu);
1362         trace_hvf_unhandled_sysreg_read(env->pc, reg,
1363                                         SYSREG_OP0(reg),
1364                                         SYSREG_OP1(reg),
1365                                         SYSREG_CRN(reg),
1366                                         SYSREG_CRM(reg),
1367                                         SYSREG_OP2(reg));
1368         hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1369         return 1;
1370     }
1371 
1372     trace_hvf_sysreg_read(reg,
1373                           SYSREG_OP0(reg),
1374                           SYSREG_OP1(reg),
1375                           SYSREG_CRN(reg),
1376                           SYSREG_CRM(reg),
1377                           SYSREG_OP2(reg),
1378                           val);
1379     hvf_set_reg(cpu, rt, val);
1380 
1381     return 0;
1382 }
1383 
pmu_update_irq(CPUARMState * env)1384 static void pmu_update_irq(CPUARMState *env)
1385 {
1386     ARMCPU *cpu = env_archcpu(env);
1387     qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1388             (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1389 }
1390 
pmu_event_supported(uint16_t number)1391 static bool pmu_event_supported(uint16_t number)
1392 {
1393     return false;
1394 }
1395 
1396 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1397  * the current EL, security state, and register configuration.
1398  */
pmu_counter_enabled(CPUARMState * env,uint8_t counter)1399 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1400 {
1401     uint64_t filter;
1402     bool enabled, filtered = true;
1403     int el = arm_current_el(env);
1404 
1405     enabled = (env->cp15.c9_pmcr & PMCRE) &&
1406               (env->cp15.c9_pmcnten & (1 << counter));
1407 
1408     if (counter == 31) {
1409         filter = env->cp15.pmccfiltr_el0;
1410     } else {
1411         filter = env->cp15.c14_pmevtyper[counter];
1412     }
1413 
1414     if (el == 0) {
1415         filtered = filter & PMXEVTYPER_U;
1416     } else if (el == 1) {
1417         filtered = filter & PMXEVTYPER_P;
1418     }
1419 
1420     if (counter != 31) {
1421         /*
1422          * If not checking PMCCNTR, ensure the counter is setup to an event we
1423          * support
1424          */
1425         uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1426         if (!pmu_event_supported(event)) {
1427             return false;
1428         }
1429     }
1430 
1431     return enabled && !filtered;
1432 }
1433 
pmswinc_write(CPUARMState * env,uint64_t value)1434 static void pmswinc_write(CPUARMState *env, uint64_t value)
1435 {
1436     unsigned int i;
1437     for (i = 0; i < pmu_num_counters(env); i++) {
1438         /* Increment a counter's count iff: */
1439         if ((value & (1 << i)) && /* counter's bit is set */
1440                 /* counter is enabled and not filtered */
1441                 pmu_counter_enabled(env, i) &&
1442                 /* counter is SW_INCR */
1443                 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1444             /*
1445              * Detect if this write causes an overflow since we can't predict
1446              * PMSWINC overflows like we can for other events
1447              */
1448             uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1449 
1450             if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1451                 env->cp15.c9_pmovsr |= (1 << i);
1452                 pmu_update_irq(env);
1453             }
1454 
1455             env->cp15.c14_pmevcntr[i] = new_pmswinc;
1456         }
1457     }
1458 }
1459 
hvf_sysreg_write_cp(CPUState * cpu,uint32_t reg,uint64_t val)1460 static bool hvf_sysreg_write_cp(CPUState *cpu, uint32_t reg, uint64_t val)
1461 {
1462     ARMCPU *arm_cpu = ARM_CPU(cpu);
1463     CPUARMState *env = &arm_cpu->env;
1464     const ARMCPRegInfo *ri;
1465 
1466     ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
1467 
1468     if (ri) {
1469         if (ri->accessfn) {
1470             if (ri->accessfn(env, ri, false) != CP_ACCESS_OK) {
1471                 return false;
1472             }
1473         }
1474         if (ri->writefn) {
1475             ri->writefn(env, ri, val);
1476         } else {
1477             CPREG_FIELD64(env, ri) = val;
1478         }
1479 
1480         trace_hvf_vgic_write(ri->name, val);
1481         return true;
1482     }
1483 
1484     return false;
1485 }
1486 
hvf_sysreg_write(CPUState * cpu,uint32_t reg,uint64_t val)1487 static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
1488 {
1489     ARMCPU *arm_cpu = ARM_CPU(cpu);
1490     CPUARMState *env = &arm_cpu->env;
1491 
1492     trace_hvf_sysreg_write(reg,
1493                            SYSREG_OP0(reg),
1494                            SYSREG_OP1(reg),
1495                            SYSREG_CRN(reg),
1496                            SYSREG_CRM(reg),
1497                            SYSREG_OP2(reg),
1498                            val);
1499 
1500     switch (reg) {
1501     case SYSREG_PMCCNTR_EL0:
1502         pmu_op_start(env);
1503         env->cp15.c15_ccnt = val;
1504         pmu_op_finish(env);
1505         break;
1506     case SYSREG_PMCR_EL0:
1507         pmu_op_start(env);
1508 
1509         if (val & PMCRC) {
1510             /* The counter has been reset */
1511             env->cp15.c15_ccnt = 0;
1512         }
1513 
1514         if (val & PMCRP) {
1515             unsigned int i;
1516             for (i = 0; i < pmu_num_counters(env); i++) {
1517                 env->cp15.c14_pmevcntr[i] = 0;
1518             }
1519         }
1520 
1521         env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
1522         env->cp15.c9_pmcr |= (val & PMCR_WRITABLE_MASK);
1523 
1524         pmu_op_finish(env);
1525         break;
1526     case SYSREG_PMUSERENR_EL0:
1527         env->cp15.c9_pmuserenr = val & 0xf;
1528         break;
1529     case SYSREG_PMCNTENSET_EL0:
1530         env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env));
1531         break;
1532     case SYSREG_PMCNTENCLR_EL0:
1533         env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env));
1534         break;
1535     case SYSREG_PMINTENCLR_EL1:
1536         pmu_op_start(env);
1537         env->cp15.c9_pminten |= val;
1538         pmu_op_finish(env);
1539         break;
1540     case SYSREG_PMOVSCLR_EL0:
1541         pmu_op_start(env);
1542         env->cp15.c9_pmovsr &= ~val;
1543         pmu_op_finish(env);
1544         break;
1545     case SYSREG_PMSWINC_EL0:
1546         pmu_op_start(env);
1547         pmswinc_write(env, val);
1548         pmu_op_finish(env);
1549         break;
1550     case SYSREG_PMSELR_EL0:
1551         env->cp15.c9_pmselr = val & 0x1f;
1552         break;
1553     case SYSREG_PMCCFILTR_EL0:
1554         pmu_op_start(env);
1555         env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0;
1556         pmu_op_finish(env);
1557         break;
1558     case SYSREG_OSLAR_EL1:
1559         env->cp15.oslsr_el1 = val & 1;
1560         break;
1561     case SYSREG_OSDLR_EL1:
1562         /* Dummy register */
1563         break;
1564     case SYSREG_ICC_AP0R0_EL1:
1565     case SYSREG_ICC_AP0R1_EL1:
1566     case SYSREG_ICC_AP0R2_EL1:
1567     case SYSREG_ICC_AP0R3_EL1:
1568     case SYSREG_ICC_AP1R0_EL1:
1569     case SYSREG_ICC_AP1R1_EL1:
1570     case SYSREG_ICC_AP1R2_EL1:
1571     case SYSREG_ICC_AP1R3_EL1:
1572     case SYSREG_ICC_ASGI1R_EL1:
1573     case SYSREG_ICC_BPR0_EL1:
1574     case SYSREG_ICC_BPR1_EL1:
1575     case SYSREG_ICC_CTLR_EL1:
1576     case SYSREG_ICC_DIR_EL1:
1577     case SYSREG_ICC_EOIR0_EL1:
1578     case SYSREG_ICC_EOIR1_EL1:
1579     case SYSREG_ICC_HPPIR0_EL1:
1580     case SYSREG_ICC_HPPIR1_EL1:
1581     case SYSREG_ICC_IAR0_EL1:
1582     case SYSREG_ICC_IAR1_EL1:
1583     case SYSREG_ICC_IGRPEN0_EL1:
1584     case SYSREG_ICC_IGRPEN1_EL1:
1585     case SYSREG_ICC_PMR_EL1:
1586     case SYSREG_ICC_SGI0R_EL1:
1587     case SYSREG_ICC_SGI1R_EL1:
1588     case SYSREG_ICC_SRE_EL1:
1589         /* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
1590         if (!hvf_sysreg_write_cp(cpu, reg, val)) {
1591             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1592         }
1593         break;
1594     case SYSREG_MDSCR_EL1:
1595         env->cp15.mdscr_el1 = val;
1596         break;
1597     case SYSREG_DBGBVR0_EL1:
1598     case SYSREG_DBGBVR1_EL1:
1599     case SYSREG_DBGBVR2_EL1:
1600     case SYSREG_DBGBVR3_EL1:
1601     case SYSREG_DBGBVR4_EL1:
1602     case SYSREG_DBGBVR5_EL1:
1603     case SYSREG_DBGBVR6_EL1:
1604     case SYSREG_DBGBVR7_EL1:
1605     case SYSREG_DBGBVR8_EL1:
1606     case SYSREG_DBGBVR9_EL1:
1607     case SYSREG_DBGBVR10_EL1:
1608     case SYSREG_DBGBVR11_EL1:
1609     case SYSREG_DBGBVR12_EL1:
1610     case SYSREG_DBGBVR13_EL1:
1611     case SYSREG_DBGBVR14_EL1:
1612     case SYSREG_DBGBVR15_EL1:
1613         env->cp15.dbgbvr[SYSREG_CRM(reg)] = val;
1614         break;
1615     case SYSREG_DBGBCR0_EL1:
1616     case SYSREG_DBGBCR1_EL1:
1617     case SYSREG_DBGBCR2_EL1:
1618     case SYSREG_DBGBCR3_EL1:
1619     case SYSREG_DBGBCR4_EL1:
1620     case SYSREG_DBGBCR5_EL1:
1621     case SYSREG_DBGBCR6_EL1:
1622     case SYSREG_DBGBCR7_EL1:
1623     case SYSREG_DBGBCR8_EL1:
1624     case SYSREG_DBGBCR9_EL1:
1625     case SYSREG_DBGBCR10_EL1:
1626     case SYSREG_DBGBCR11_EL1:
1627     case SYSREG_DBGBCR12_EL1:
1628     case SYSREG_DBGBCR13_EL1:
1629     case SYSREG_DBGBCR14_EL1:
1630     case SYSREG_DBGBCR15_EL1:
1631         env->cp15.dbgbcr[SYSREG_CRM(reg)] = val;
1632         break;
1633     case SYSREG_DBGWVR0_EL1:
1634     case SYSREG_DBGWVR1_EL1:
1635     case SYSREG_DBGWVR2_EL1:
1636     case SYSREG_DBGWVR3_EL1:
1637     case SYSREG_DBGWVR4_EL1:
1638     case SYSREG_DBGWVR5_EL1:
1639     case SYSREG_DBGWVR6_EL1:
1640     case SYSREG_DBGWVR7_EL1:
1641     case SYSREG_DBGWVR8_EL1:
1642     case SYSREG_DBGWVR9_EL1:
1643     case SYSREG_DBGWVR10_EL1:
1644     case SYSREG_DBGWVR11_EL1:
1645     case SYSREG_DBGWVR12_EL1:
1646     case SYSREG_DBGWVR13_EL1:
1647     case SYSREG_DBGWVR14_EL1:
1648     case SYSREG_DBGWVR15_EL1:
1649         env->cp15.dbgwvr[SYSREG_CRM(reg)] = val;
1650         break;
1651     case SYSREG_DBGWCR0_EL1:
1652     case SYSREG_DBGWCR1_EL1:
1653     case SYSREG_DBGWCR2_EL1:
1654     case SYSREG_DBGWCR3_EL1:
1655     case SYSREG_DBGWCR4_EL1:
1656     case SYSREG_DBGWCR5_EL1:
1657     case SYSREG_DBGWCR6_EL1:
1658     case SYSREG_DBGWCR7_EL1:
1659     case SYSREG_DBGWCR8_EL1:
1660     case SYSREG_DBGWCR9_EL1:
1661     case SYSREG_DBGWCR10_EL1:
1662     case SYSREG_DBGWCR11_EL1:
1663     case SYSREG_DBGWCR12_EL1:
1664     case SYSREG_DBGWCR13_EL1:
1665     case SYSREG_DBGWCR14_EL1:
1666     case SYSREG_DBGWCR15_EL1:
1667         env->cp15.dbgwcr[SYSREG_CRM(reg)] = val;
1668         break;
1669     default:
1670         cpu_synchronize_state(cpu);
1671         trace_hvf_unhandled_sysreg_write(env->pc, reg,
1672                                          SYSREG_OP0(reg),
1673                                          SYSREG_OP1(reg),
1674                                          SYSREG_CRN(reg),
1675                                          SYSREG_CRM(reg),
1676                                          SYSREG_OP2(reg));
1677         hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1678         return 1;
1679     }
1680 
1681     return 0;
1682 }
1683 
hvf_inject_interrupts(CPUState * cpu)1684 static int hvf_inject_interrupts(CPUState *cpu)
1685 {
1686     if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) {
1687         trace_hvf_inject_fiq();
1688         hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_FIQ,
1689                                       true);
1690     }
1691 
1692     if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
1693         trace_hvf_inject_irq();
1694         hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_IRQ,
1695                                       true);
1696     }
1697 
1698     return 0;
1699 }
1700 
hvf_vtimer_val_raw(void)1701 static uint64_t hvf_vtimer_val_raw(void)
1702 {
1703     /*
1704      * mach_absolute_time() returns the vtimer value without the VM
1705      * offset that we define. Add our own offset on top.
1706      */
1707     return mach_absolute_time() - hvf_state->vtimer_offset;
1708 }
1709 
hvf_vtimer_val(void)1710 static uint64_t hvf_vtimer_val(void)
1711 {
1712     if (!runstate_is_running()) {
1713         /* VM is paused, the vtimer value is in vtimer.vtimer_val */
1714         return vtimer.vtimer_val;
1715     }
1716 
1717     return hvf_vtimer_val_raw();
1718 }
1719 
hvf_wait_for_ipi(CPUState * cpu,struct timespec * ts)1720 static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
1721 {
1722     /*
1723      * Use pselect to sleep so that other threads can IPI us while we're
1724      * sleeping.
1725      */
1726     qatomic_set_mb(&cpu->thread_kicked, false);
1727     bql_unlock();
1728     pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask);
1729     bql_lock();
1730 }
1731 
hvf_wfi(CPUState * cpu)1732 static void hvf_wfi(CPUState *cpu)
1733 {
1734     ARMCPU *arm_cpu = ARM_CPU(cpu);
1735     struct timespec ts;
1736     hv_return_t r;
1737     uint64_t ctl;
1738     uint64_t cval;
1739     int64_t ticks_to_sleep;
1740     uint64_t seconds;
1741     uint64_t nanos;
1742     uint32_t cntfrq;
1743 
1744     if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) {
1745         /* Interrupt pending, no need to wait */
1746         return;
1747     }
1748 
1749     r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
1750     assert_hvf_ok(r);
1751 
1752     if (!(ctl & 1) || (ctl & 2)) {
1753         /* Timer disabled or masked, just wait for an IPI. */
1754         hvf_wait_for_ipi(cpu, NULL);
1755         return;
1756     }
1757 
1758     r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
1759     assert_hvf_ok(r);
1760 
1761     ticks_to_sleep = cval - hvf_vtimer_val();
1762     if (ticks_to_sleep < 0) {
1763         return;
1764     }
1765 
1766     cntfrq = gt_cntfrq_period_ns(arm_cpu);
1767     seconds = muldiv64(ticks_to_sleep, cntfrq, NANOSECONDS_PER_SECOND);
1768     ticks_to_sleep -= muldiv64(seconds, NANOSECONDS_PER_SECOND, cntfrq);
1769     nanos = ticks_to_sleep * cntfrq;
1770 
1771     /*
1772      * Don't sleep for less than the time a context switch would take,
1773      * so that we can satisfy fast timer requests on the same CPU.
1774      * Measurements on M1 show the sweet spot to be ~2ms.
1775      */
1776     if (!seconds && nanos < (2 * SCALE_MS)) {
1777         return;
1778     }
1779 
1780     ts = (struct timespec) { seconds, nanos };
1781     hvf_wait_for_ipi(cpu, &ts);
1782 }
1783 
hvf_sync_vtimer(CPUState * cpu)1784 static void hvf_sync_vtimer(CPUState *cpu)
1785 {
1786     ARMCPU *arm_cpu = ARM_CPU(cpu);
1787     hv_return_t r;
1788     uint64_t ctl;
1789     bool irq_state;
1790 
1791     if (!cpu->accel->vtimer_masked) {
1792         /* We will get notified on vtimer changes by hvf, nothing to do */
1793         return;
1794     }
1795 
1796     r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
1797     assert_hvf_ok(r);
1798 
1799     irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) ==
1800                 (TMR_CTL_ENABLE | TMR_CTL_ISTATUS);
1801     qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], irq_state);
1802 
1803     if (!irq_state) {
1804         /* Timer no longer asserting, we can unmask it */
1805         hv_vcpu_set_vtimer_mask(cpu->accel->fd, false);
1806         cpu->accel->vtimer_masked = false;
1807     }
1808 }
1809 
hvf_vcpu_exec(CPUState * cpu)1810 int hvf_vcpu_exec(CPUState *cpu)
1811 {
1812     ARMCPU *arm_cpu = ARM_CPU(cpu);
1813     CPUARMState *env = &arm_cpu->env;
1814     int ret;
1815     hv_vcpu_exit_t *hvf_exit = cpu->accel->exit;
1816     hv_return_t r;
1817     bool advance_pc = false;
1818 
1819     if (!(cpu->singlestep_enabled & SSTEP_NOIRQ) &&
1820         hvf_inject_interrupts(cpu)) {
1821         return EXCP_INTERRUPT;
1822     }
1823 
1824     if (cpu->halted) {
1825         return EXCP_HLT;
1826     }
1827 
1828     flush_cpu_state(cpu);
1829 
1830     bql_unlock();
1831     assert_hvf_ok(hv_vcpu_run(cpu->accel->fd));
1832 
1833     /* handle VMEXIT */
1834     uint64_t exit_reason = hvf_exit->reason;
1835     uint64_t syndrome = hvf_exit->exception.syndrome;
1836     uint32_t ec = syn_get_ec(syndrome);
1837 
1838     ret = 0;
1839     bql_lock();
1840     switch (exit_reason) {
1841     case HV_EXIT_REASON_EXCEPTION:
1842         /* This is the main one, handle below. */
1843         break;
1844     case HV_EXIT_REASON_VTIMER_ACTIVATED:
1845         qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1);
1846         cpu->accel->vtimer_masked = true;
1847         return 0;
1848     case HV_EXIT_REASON_CANCELED:
1849         /* we got kicked, no exit to process */
1850         return 0;
1851     default:
1852         g_assert_not_reached();
1853     }
1854 
1855     hvf_sync_vtimer(cpu);
1856 
1857     switch (ec) {
1858     case EC_SOFTWARESTEP: {
1859         ret = EXCP_DEBUG;
1860 
1861         if (!cpu->singlestep_enabled) {
1862             error_report("EC_SOFTWARESTEP but single-stepping not enabled");
1863         }
1864         break;
1865     }
1866     case EC_AA64_BKPT: {
1867         ret = EXCP_DEBUG;
1868 
1869         cpu_synchronize_state(cpu);
1870 
1871         if (!hvf_find_sw_breakpoint(cpu, env->pc)) {
1872             /* Re-inject into the guest */
1873             ret = 0;
1874             hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0));
1875         }
1876         break;
1877     }
1878     case EC_BREAKPOINT: {
1879         ret = EXCP_DEBUG;
1880 
1881         cpu_synchronize_state(cpu);
1882 
1883         if (!find_hw_breakpoint(cpu, env->pc)) {
1884             error_report("EC_BREAKPOINT but unknown hw breakpoint");
1885         }
1886         break;
1887     }
1888     case EC_WATCHPOINT: {
1889         ret = EXCP_DEBUG;
1890 
1891         cpu_synchronize_state(cpu);
1892 
1893         CPUWatchpoint *wp =
1894             find_hw_watchpoint(cpu, hvf_exit->exception.virtual_address);
1895         if (!wp) {
1896             error_report("EXCP_DEBUG but unknown hw watchpoint");
1897         }
1898         cpu->watchpoint_hit = wp;
1899         break;
1900     }
1901     case EC_DATAABORT: {
1902         bool isv = syndrome & ARM_EL_ISV;
1903         bool iswrite = (syndrome >> 6) & 1;
1904         bool s1ptw = (syndrome >> 7) & 1;
1905         uint32_t sas = (syndrome >> 22) & 3;
1906         uint32_t len = 1 << sas;
1907         uint32_t srt = (syndrome >> 16) & 0x1f;
1908         uint32_t cm = (syndrome >> 8) & 0x1;
1909         uint64_t val = 0;
1910 
1911         trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address,
1912                              hvf_exit->exception.physical_address, isv,
1913                              iswrite, s1ptw, len, srt);
1914 
1915         if (cm) {
1916             /* We don't cache MMIO regions */
1917             advance_pc = true;
1918             break;
1919         }
1920 
1921         assert(isv);
1922 
1923         if (iswrite) {
1924             val = hvf_get_reg(cpu, srt);
1925             address_space_write(&address_space_memory,
1926                                 hvf_exit->exception.physical_address,
1927                                 MEMTXATTRS_UNSPECIFIED, &val, len);
1928         } else {
1929             address_space_read(&address_space_memory,
1930                                hvf_exit->exception.physical_address,
1931                                MEMTXATTRS_UNSPECIFIED, &val, len);
1932             hvf_set_reg(cpu, srt, val);
1933         }
1934 
1935         advance_pc = true;
1936         break;
1937     }
1938     case EC_SYSTEMREGISTERTRAP: {
1939         bool isread = (syndrome >> 0) & 1;
1940         uint32_t rt = (syndrome >> 5) & 0x1f;
1941         uint32_t reg = syndrome & SYSREG_MASK;
1942         uint64_t val;
1943         int sysreg_ret = 0;
1944 
1945         if (isread) {
1946             sysreg_ret = hvf_sysreg_read(cpu, reg, rt);
1947         } else {
1948             val = hvf_get_reg(cpu, rt);
1949             sysreg_ret = hvf_sysreg_write(cpu, reg, val);
1950         }
1951 
1952         advance_pc = !sysreg_ret;
1953         break;
1954     }
1955     case EC_WFX_TRAP:
1956         advance_pc = true;
1957         if (!(syndrome & WFX_IS_WFE)) {
1958             hvf_wfi(cpu);
1959         }
1960         break;
1961     case EC_AA64_HVC:
1962         cpu_synchronize_state(cpu);
1963         if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) {
1964             if (!hvf_handle_psci_call(cpu)) {
1965                 trace_hvf_unknown_hvc(env->xregs[0]);
1966                 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
1967                 env->xregs[0] = -1;
1968             }
1969         } else {
1970             trace_hvf_unknown_hvc(env->xregs[0]);
1971             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1972         }
1973         break;
1974     case EC_AA64_SMC:
1975         cpu_synchronize_state(cpu);
1976         if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_SMC) {
1977             advance_pc = true;
1978 
1979             if (!hvf_handle_psci_call(cpu)) {
1980                 trace_hvf_unknown_smc(env->xregs[0]);
1981                 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
1982                 env->xregs[0] = -1;
1983             }
1984         } else {
1985             trace_hvf_unknown_smc(env->xregs[0]);
1986             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1987         }
1988         break;
1989     default:
1990         cpu_synchronize_state(cpu);
1991         trace_hvf_exit(syndrome, ec, env->pc);
1992         error_report("0x%llx: unhandled exception ec=0x%x", env->pc, ec);
1993     }
1994 
1995     if (advance_pc) {
1996         uint64_t pc;
1997 
1998         flush_cpu_state(cpu);
1999 
2000         r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_PC, &pc);
2001         assert_hvf_ok(r);
2002         pc += 4;
2003         r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_PC, pc);
2004         assert_hvf_ok(r);
2005 
2006         /* Handle single-stepping over instructions which trigger a VM exit */
2007         if (cpu->singlestep_enabled) {
2008             ret = EXCP_DEBUG;
2009         }
2010     }
2011 
2012     return ret;
2013 }
2014 
2015 static const VMStateDescription vmstate_hvf_vtimer = {
2016     .name = "hvf-vtimer",
2017     .version_id = 1,
2018     .minimum_version_id = 1,
2019     .fields = (const VMStateField[]) {
2020         VMSTATE_UINT64(vtimer_val, HVFVTimer),
2021         VMSTATE_END_OF_LIST()
2022     },
2023 };
2024 
hvf_vm_state_change(void * opaque,bool running,RunState state)2025 static void hvf_vm_state_change(void *opaque, bool running, RunState state)
2026 {
2027     HVFVTimer *s = opaque;
2028 
2029     if (running) {
2030         /* Update vtimer offset on all CPUs */
2031         hvf_state->vtimer_offset = mach_absolute_time() - s->vtimer_val;
2032         cpu_synchronize_all_states();
2033     } else {
2034         /* Remember vtimer value on every pause */
2035         s->vtimer_val = hvf_vtimer_val_raw();
2036     }
2037 }
2038 
hvf_arch_init(void)2039 int hvf_arch_init(void)
2040 {
2041     hvf_state->vtimer_offset = mach_absolute_time();
2042     vmstate_register(NULL, 0, &vmstate_hvf_vtimer, &vtimer);
2043     qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer);
2044 
2045     hvf_arm_init_debug();
2046 
2047     return 0;
2048 }
2049 
2050 static const uint32_t brk_insn = 0xd4200000;
2051 
hvf_arch_insert_sw_breakpoint(CPUState * cpu,struct hvf_sw_breakpoint * bp)2052 int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp)
2053 {
2054     if (cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
2055         cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
2056         return -EINVAL;
2057     }
2058     return 0;
2059 }
2060 
hvf_arch_remove_sw_breakpoint(CPUState * cpu,struct hvf_sw_breakpoint * bp)2061 int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp)
2062 {
2063     static uint32_t brk;
2064 
2065     if (cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&brk, 4, 0) ||
2066         brk != brk_insn ||
2067         cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
2068         return -EINVAL;
2069     }
2070     return 0;
2071 }
2072 
hvf_arch_insert_hw_breakpoint(vaddr addr,vaddr len,int type)2073 int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
2074 {
2075     switch (type) {
2076     case GDB_BREAKPOINT_HW:
2077         return insert_hw_breakpoint(addr);
2078     case GDB_WATCHPOINT_READ:
2079     case GDB_WATCHPOINT_WRITE:
2080     case GDB_WATCHPOINT_ACCESS:
2081         return insert_hw_watchpoint(addr, len, type);
2082     default:
2083         return -ENOSYS;
2084     }
2085 }
2086 
hvf_arch_remove_hw_breakpoint(vaddr addr,vaddr len,int type)2087 int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
2088 {
2089     switch (type) {
2090     case GDB_BREAKPOINT_HW:
2091         return delete_hw_breakpoint(addr);
2092     case GDB_WATCHPOINT_READ:
2093     case GDB_WATCHPOINT_WRITE:
2094     case GDB_WATCHPOINT_ACCESS:
2095         return delete_hw_watchpoint(addr, len, type);
2096     default:
2097         return -ENOSYS;
2098     }
2099 }
2100 
hvf_arch_remove_all_hw_breakpoints(void)2101 void hvf_arch_remove_all_hw_breakpoints(void)
2102 {
2103     if (cur_hw_wps > 0) {
2104         g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
2105     }
2106     if (cur_hw_bps > 0) {
2107         g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
2108     }
2109 }
2110 
2111 /*
2112  * Update the vCPU with the gdbstub's view of debug registers. This view
2113  * consists of all hardware breakpoints and watchpoints inserted so far while
2114  * debugging the guest.
2115  */
hvf_put_gdbstub_debug_registers(CPUState * cpu)2116 static void hvf_put_gdbstub_debug_registers(CPUState *cpu)
2117 {
2118     hv_return_t r = HV_SUCCESS;
2119     int i;
2120 
2121     for (i = 0; i < cur_hw_bps; i++) {
2122         HWBreakpoint *bp = get_hw_bp(i);
2123         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], bp->bcr);
2124         assert_hvf_ok(r);
2125         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], bp->bvr);
2126         assert_hvf_ok(r);
2127     }
2128     for (i = cur_hw_bps; i < max_hw_bps; i++) {
2129         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], 0);
2130         assert_hvf_ok(r);
2131         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], 0);
2132         assert_hvf_ok(r);
2133     }
2134 
2135     for (i = 0; i < cur_hw_wps; i++) {
2136         HWWatchpoint *wp = get_hw_wp(i);
2137         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], wp->wcr);
2138         assert_hvf_ok(r);
2139         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], wp->wvr);
2140         assert_hvf_ok(r);
2141     }
2142     for (i = cur_hw_wps; i < max_hw_wps; i++) {
2143         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], 0);
2144         assert_hvf_ok(r);
2145         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], 0);
2146         assert_hvf_ok(r);
2147     }
2148 }
2149 
2150 /*
2151  * Update the vCPU with the guest's view of debug registers. This view is kept
2152  * in the environment at all times.
2153  */
hvf_put_guest_debug_registers(CPUState * cpu)2154 static void hvf_put_guest_debug_registers(CPUState *cpu)
2155 {
2156     ARMCPU *arm_cpu = ARM_CPU(cpu);
2157     CPUARMState *env = &arm_cpu->env;
2158     hv_return_t r = HV_SUCCESS;
2159     int i;
2160 
2161     for (i = 0; i < max_hw_bps; i++) {
2162         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i],
2163                                 env->cp15.dbgbcr[i]);
2164         assert_hvf_ok(r);
2165         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i],
2166                                 env->cp15.dbgbvr[i]);
2167         assert_hvf_ok(r);
2168     }
2169 
2170     for (i = 0; i < max_hw_wps; i++) {
2171         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i],
2172                                 env->cp15.dbgwcr[i]);
2173         assert_hvf_ok(r);
2174         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i],
2175                                 env->cp15.dbgwvr[i]);
2176         assert_hvf_ok(r);
2177     }
2178 }
2179 
hvf_arm_hw_debug_active(CPUState * cpu)2180 static inline bool hvf_arm_hw_debug_active(CPUState *cpu)
2181 {
2182     return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
2183 }
2184 
hvf_arch_set_traps(void)2185 static void hvf_arch_set_traps(void)
2186 {
2187     CPUState *cpu;
2188     bool should_enable_traps = false;
2189     hv_return_t r = HV_SUCCESS;
2190 
2191     /* Check whether guest debugging is enabled for at least one vCPU; if it
2192      * is, enable exiting the guest on all vCPUs */
2193     CPU_FOREACH(cpu) {
2194         should_enable_traps |= cpu->accel->guest_debug_enabled;
2195     }
2196     CPU_FOREACH(cpu) {
2197         /* Set whether debug exceptions exit the guest */
2198         r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd,
2199                                               should_enable_traps);
2200         assert_hvf_ok(r);
2201 
2202         /* Set whether accesses to debug registers exit the guest */
2203         r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd,
2204                                                 should_enable_traps);
2205         assert_hvf_ok(r);
2206     }
2207 }
2208 
hvf_arch_update_guest_debug(CPUState * cpu)2209 void hvf_arch_update_guest_debug(CPUState *cpu)
2210 {
2211     ARMCPU *arm_cpu = ARM_CPU(cpu);
2212     CPUARMState *env = &arm_cpu->env;
2213 
2214     /* Check whether guest debugging is enabled */
2215     cpu->accel->guest_debug_enabled = cpu->singlestep_enabled ||
2216                                     hvf_sw_breakpoints_active(cpu) ||
2217                                     hvf_arm_hw_debug_active(cpu);
2218 
2219     /* Update debug registers */
2220     if (cpu->accel->guest_debug_enabled) {
2221         hvf_put_gdbstub_debug_registers(cpu);
2222     } else {
2223         hvf_put_guest_debug_registers(cpu);
2224     }
2225 
2226     cpu_synchronize_state(cpu);
2227 
2228     /* Enable/disable single-stepping */
2229     if (cpu->singlestep_enabled) {
2230         env->cp15.mdscr_el1 =
2231             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_SS_SHIFT, 1, 1);
2232         pstate_write(env, pstate_read(env) | PSTATE_SS);
2233     } else {
2234         env->cp15.mdscr_el1 =
2235             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_SS_SHIFT, 1, 0);
2236     }
2237 
2238     /* Enable/disable Breakpoint exceptions */
2239     if (hvf_arm_hw_debug_active(cpu)) {
2240         env->cp15.mdscr_el1 =
2241             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 1);
2242     } else {
2243         env->cp15.mdscr_el1 =
2244             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 0);
2245     }
2246 
2247     hvf_arch_set_traps();
2248 }
2249 
hvf_arch_supports_guest_debug(void)2250 bool hvf_arch_supports_guest_debug(void)
2251 {
2252     return true;
2253 }
2254