xref: /qemu/target/arm/hvf/hvf.c (revision d54ffa54)
1 /*
2  * QEMU Hypervisor.framework support for Apple Silicon
3 
4  * Copyright 2020 Alexander Graf <agraf@csgraf.de>
5  * Copyright 2020 Google LLC
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/error-report.h"
14 
15 #include "sysemu/runstate.h"
16 #include "sysemu/hvf.h"
17 #include "sysemu/hvf_int.h"
18 #include "sysemu/hw_accel.h"
19 #include "hvf_arm.h"
20 #include "cpregs.h"
21 
22 #include <mach/mach_time.h>
23 
24 #include "exec/address-spaces.h"
25 #include "hw/boards.h"
26 #include "hw/irq.h"
27 #include "qemu/main-loop.h"
28 #include "sysemu/cpus.h"
29 #include "arm-powerctl.h"
30 #include "target/arm/cpu.h"
31 #include "target/arm/internals.h"
32 #include "target/arm/multiprocessing.h"
33 #include "target/arm/gtimer.h"
34 #include "trace/trace-target_arm_hvf.h"
35 #include "migration/vmstate.h"
36 
37 #include "gdbstub/enums.h"
38 
39 #define MDSCR_EL1_SS_SHIFT  0
40 #define MDSCR_EL1_MDE_SHIFT 15
41 
42 static const uint16_t dbgbcr_regs[] = {
43     HV_SYS_REG_DBGBCR0_EL1,
44     HV_SYS_REG_DBGBCR1_EL1,
45     HV_SYS_REG_DBGBCR2_EL1,
46     HV_SYS_REG_DBGBCR3_EL1,
47     HV_SYS_REG_DBGBCR4_EL1,
48     HV_SYS_REG_DBGBCR5_EL1,
49     HV_SYS_REG_DBGBCR6_EL1,
50     HV_SYS_REG_DBGBCR7_EL1,
51     HV_SYS_REG_DBGBCR8_EL1,
52     HV_SYS_REG_DBGBCR9_EL1,
53     HV_SYS_REG_DBGBCR10_EL1,
54     HV_SYS_REG_DBGBCR11_EL1,
55     HV_SYS_REG_DBGBCR12_EL1,
56     HV_SYS_REG_DBGBCR13_EL1,
57     HV_SYS_REG_DBGBCR14_EL1,
58     HV_SYS_REG_DBGBCR15_EL1,
59 };
60 
61 static const uint16_t dbgbvr_regs[] = {
62     HV_SYS_REG_DBGBVR0_EL1,
63     HV_SYS_REG_DBGBVR1_EL1,
64     HV_SYS_REG_DBGBVR2_EL1,
65     HV_SYS_REG_DBGBVR3_EL1,
66     HV_SYS_REG_DBGBVR4_EL1,
67     HV_SYS_REG_DBGBVR5_EL1,
68     HV_SYS_REG_DBGBVR6_EL1,
69     HV_SYS_REG_DBGBVR7_EL1,
70     HV_SYS_REG_DBGBVR8_EL1,
71     HV_SYS_REG_DBGBVR9_EL1,
72     HV_SYS_REG_DBGBVR10_EL1,
73     HV_SYS_REG_DBGBVR11_EL1,
74     HV_SYS_REG_DBGBVR12_EL1,
75     HV_SYS_REG_DBGBVR13_EL1,
76     HV_SYS_REG_DBGBVR14_EL1,
77     HV_SYS_REG_DBGBVR15_EL1,
78 };
79 
80 static const uint16_t dbgwcr_regs[] = {
81     HV_SYS_REG_DBGWCR0_EL1,
82     HV_SYS_REG_DBGWCR1_EL1,
83     HV_SYS_REG_DBGWCR2_EL1,
84     HV_SYS_REG_DBGWCR3_EL1,
85     HV_SYS_REG_DBGWCR4_EL1,
86     HV_SYS_REG_DBGWCR5_EL1,
87     HV_SYS_REG_DBGWCR6_EL1,
88     HV_SYS_REG_DBGWCR7_EL1,
89     HV_SYS_REG_DBGWCR8_EL1,
90     HV_SYS_REG_DBGWCR9_EL1,
91     HV_SYS_REG_DBGWCR10_EL1,
92     HV_SYS_REG_DBGWCR11_EL1,
93     HV_SYS_REG_DBGWCR12_EL1,
94     HV_SYS_REG_DBGWCR13_EL1,
95     HV_SYS_REG_DBGWCR14_EL1,
96     HV_SYS_REG_DBGWCR15_EL1,
97 };
98 
99 static const uint16_t dbgwvr_regs[] = {
100     HV_SYS_REG_DBGWVR0_EL1,
101     HV_SYS_REG_DBGWVR1_EL1,
102     HV_SYS_REG_DBGWVR2_EL1,
103     HV_SYS_REG_DBGWVR3_EL1,
104     HV_SYS_REG_DBGWVR4_EL1,
105     HV_SYS_REG_DBGWVR5_EL1,
106     HV_SYS_REG_DBGWVR6_EL1,
107     HV_SYS_REG_DBGWVR7_EL1,
108     HV_SYS_REG_DBGWVR8_EL1,
109     HV_SYS_REG_DBGWVR9_EL1,
110     HV_SYS_REG_DBGWVR10_EL1,
111     HV_SYS_REG_DBGWVR11_EL1,
112     HV_SYS_REG_DBGWVR12_EL1,
113     HV_SYS_REG_DBGWVR13_EL1,
114     HV_SYS_REG_DBGWVR14_EL1,
115     HV_SYS_REG_DBGWVR15_EL1,
116 };
117 
hvf_arm_num_brps(hv_vcpu_config_t config)118 static inline int hvf_arm_num_brps(hv_vcpu_config_t config)
119 {
120     uint64_t val;
121     hv_return_t ret;
122     ret = hv_vcpu_config_get_feature_reg(config, HV_FEATURE_REG_ID_AA64DFR0_EL1,
123                                          &val);
124     assert_hvf_ok(ret);
125     return FIELD_EX64(val, ID_AA64DFR0, BRPS) + 1;
126 }
127 
hvf_arm_num_wrps(hv_vcpu_config_t config)128 static inline int hvf_arm_num_wrps(hv_vcpu_config_t config)
129 {
130     uint64_t val;
131     hv_return_t ret;
132     ret = hv_vcpu_config_get_feature_reg(config, HV_FEATURE_REG_ID_AA64DFR0_EL1,
133                                          &val);
134     assert_hvf_ok(ret);
135     return FIELD_EX64(val, ID_AA64DFR0, WRPS) + 1;
136 }
137 
hvf_arm_init_debug(void)138 void hvf_arm_init_debug(void)
139 {
140     hv_vcpu_config_t config;
141     config = hv_vcpu_config_create();
142 
143     max_hw_bps = hvf_arm_num_brps(config);
144     hw_breakpoints =
145         g_array_sized_new(true, true, sizeof(HWBreakpoint), max_hw_bps);
146 
147     max_hw_wps = hvf_arm_num_wrps(config);
148     hw_watchpoints =
149         g_array_sized_new(true, true, sizeof(HWWatchpoint), max_hw_wps);
150 }
151 
152 #define HVF_SYSREG(crn, crm, op0, op1, op2) \
153         ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
154 
155 #define SYSREG_OP0_SHIFT      20
156 #define SYSREG_OP0_MASK       0x3
157 #define SYSREG_OP0(sysreg)    ((sysreg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK)
158 #define SYSREG_OP1_SHIFT      14
159 #define SYSREG_OP1_MASK       0x7
160 #define SYSREG_OP1(sysreg)    ((sysreg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK)
161 #define SYSREG_CRN_SHIFT      10
162 #define SYSREG_CRN_MASK       0xf
163 #define SYSREG_CRN(sysreg)    ((sysreg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK)
164 #define SYSREG_CRM_SHIFT      1
165 #define SYSREG_CRM_MASK       0xf
166 #define SYSREG_CRM(sysreg)    ((sysreg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK)
167 #define SYSREG_OP2_SHIFT      17
168 #define SYSREG_OP2_MASK       0x7
169 #define SYSREG_OP2(sysreg)    ((sysreg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK)
170 
171 #define SYSREG(op0, op1, crn, crm, op2) \
172     ((op0 << SYSREG_OP0_SHIFT) | \
173      (op1 << SYSREG_OP1_SHIFT) | \
174      (crn << SYSREG_CRN_SHIFT) | \
175      (crm << SYSREG_CRM_SHIFT) | \
176      (op2 << SYSREG_OP2_SHIFT))
177 #define SYSREG_MASK \
178     SYSREG(SYSREG_OP0_MASK, \
179            SYSREG_OP1_MASK, \
180            SYSREG_CRN_MASK, \
181            SYSREG_CRM_MASK, \
182            SYSREG_OP2_MASK)
183 #define SYSREG_OSLAR_EL1      SYSREG(2, 0, 1, 0, 4)
184 #define SYSREG_OSLSR_EL1      SYSREG(2, 0, 1, 1, 4)
185 #define SYSREG_OSDLR_EL1      SYSREG(2, 0, 1, 3, 4)
186 #define SYSREG_CNTPCT_EL0     SYSREG(3, 3, 14, 0, 1)
187 #define SYSREG_PMCR_EL0       SYSREG(3, 3, 9, 12, 0)
188 #define SYSREG_PMUSERENR_EL0  SYSREG(3, 3, 9, 14, 0)
189 #define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1)
190 #define SYSREG_PMCNTENCLR_EL0 SYSREG(3, 3, 9, 12, 2)
191 #define SYSREG_PMINTENCLR_EL1 SYSREG(3, 0, 9, 14, 2)
192 #define SYSREG_PMOVSCLR_EL0   SYSREG(3, 3, 9, 12, 3)
193 #define SYSREG_PMSWINC_EL0    SYSREG(3, 3, 9, 12, 4)
194 #define SYSREG_PMSELR_EL0     SYSREG(3, 3, 9, 12, 5)
195 #define SYSREG_PMCEID0_EL0    SYSREG(3, 3, 9, 12, 6)
196 #define SYSREG_PMCEID1_EL0    SYSREG(3, 3, 9, 12, 7)
197 #define SYSREG_PMCCNTR_EL0    SYSREG(3, 3, 9, 13, 0)
198 #define SYSREG_PMCCFILTR_EL0  SYSREG(3, 3, 14, 15, 7)
199 
200 #define SYSREG_ICC_AP0R0_EL1     SYSREG(3, 0, 12, 8, 4)
201 #define SYSREG_ICC_AP0R1_EL1     SYSREG(3, 0, 12, 8, 5)
202 #define SYSREG_ICC_AP0R2_EL1     SYSREG(3, 0, 12, 8, 6)
203 #define SYSREG_ICC_AP0R3_EL1     SYSREG(3, 0, 12, 8, 7)
204 #define SYSREG_ICC_AP1R0_EL1     SYSREG(3, 0, 12, 9, 0)
205 #define SYSREG_ICC_AP1R1_EL1     SYSREG(3, 0, 12, 9, 1)
206 #define SYSREG_ICC_AP1R2_EL1     SYSREG(3, 0, 12, 9, 2)
207 #define SYSREG_ICC_AP1R3_EL1     SYSREG(3, 0, 12, 9, 3)
208 #define SYSREG_ICC_ASGI1R_EL1    SYSREG(3, 0, 12, 11, 6)
209 #define SYSREG_ICC_BPR0_EL1      SYSREG(3, 0, 12, 8, 3)
210 #define SYSREG_ICC_BPR1_EL1      SYSREG(3, 0, 12, 12, 3)
211 #define SYSREG_ICC_CTLR_EL1      SYSREG(3, 0, 12, 12, 4)
212 #define SYSREG_ICC_DIR_EL1       SYSREG(3, 0, 12, 11, 1)
213 #define SYSREG_ICC_EOIR0_EL1     SYSREG(3, 0, 12, 8, 1)
214 #define SYSREG_ICC_EOIR1_EL1     SYSREG(3, 0, 12, 12, 1)
215 #define SYSREG_ICC_HPPIR0_EL1    SYSREG(3, 0, 12, 8, 2)
216 #define SYSREG_ICC_HPPIR1_EL1    SYSREG(3, 0, 12, 12, 2)
217 #define SYSREG_ICC_IAR0_EL1      SYSREG(3, 0, 12, 8, 0)
218 #define SYSREG_ICC_IAR1_EL1      SYSREG(3, 0, 12, 12, 0)
219 #define SYSREG_ICC_IGRPEN0_EL1   SYSREG(3, 0, 12, 12, 6)
220 #define SYSREG_ICC_IGRPEN1_EL1   SYSREG(3, 0, 12, 12, 7)
221 #define SYSREG_ICC_PMR_EL1       SYSREG(3, 0, 4, 6, 0)
222 #define SYSREG_ICC_RPR_EL1       SYSREG(3, 0, 12, 11, 3)
223 #define SYSREG_ICC_SGI0R_EL1     SYSREG(3, 0, 12, 11, 7)
224 #define SYSREG_ICC_SGI1R_EL1     SYSREG(3, 0, 12, 11, 5)
225 #define SYSREG_ICC_SRE_EL1       SYSREG(3, 0, 12, 12, 5)
226 
227 #define SYSREG_MDSCR_EL1      SYSREG(2, 0, 0, 2, 2)
228 #define SYSREG_DBGBVR0_EL1    SYSREG(2, 0, 0, 0, 4)
229 #define SYSREG_DBGBCR0_EL1    SYSREG(2, 0, 0, 0, 5)
230 #define SYSREG_DBGWVR0_EL1    SYSREG(2, 0, 0, 0, 6)
231 #define SYSREG_DBGWCR0_EL1    SYSREG(2, 0, 0, 0, 7)
232 #define SYSREG_DBGBVR1_EL1    SYSREG(2, 0, 0, 1, 4)
233 #define SYSREG_DBGBCR1_EL1    SYSREG(2, 0, 0, 1, 5)
234 #define SYSREG_DBGWVR1_EL1    SYSREG(2, 0, 0, 1, 6)
235 #define SYSREG_DBGWCR1_EL1    SYSREG(2, 0, 0, 1, 7)
236 #define SYSREG_DBGBVR2_EL1    SYSREG(2, 0, 0, 2, 4)
237 #define SYSREG_DBGBCR2_EL1    SYSREG(2, 0, 0, 2, 5)
238 #define SYSREG_DBGWVR2_EL1    SYSREG(2, 0, 0, 2, 6)
239 #define SYSREG_DBGWCR2_EL1    SYSREG(2, 0, 0, 2, 7)
240 #define SYSREG_DBGBVR3_EL1    SYSREG(2, 0, 0, 3, 4)
241 #define SYSREG_DBGBCR3_EL1    SYSREG(2, 0, 0, 3, 5)
242 #define SYSREG_DBGWVR3_EL1    SYSREG(2, 0, 0, 3, 6)
243 #define SYSREG_DBGWCR3_EL1    SYSREG(2, 0, 0, 3, 7)
244 #define SYSREG_DBGBVR4_EL1    SYSREG(2, 0, 0, 4, 4)
245 #define SYSREG_DBGBCR4_EL1    SYSREG(2, 0, 0, 4, 5)
246 #define SYSREG_DBGWVR4_EL1    SYSREG(2, 0, 0, 4, 6)
247 #define SYSREG_DBGWCR4_EL1    SYSREG(2, 0, 0, 4, 7)
248 #define SYSREG_DBGBVR5_EL1    SYSREG(2, 0, 0, 5, 4)
249 #define SYSREG_DBGBCR5_EL1    SYSREG(2, 0, 0, 5, 5)
250 #define SYSREG_DBGWVR5_EL1    SYSREG(2, 0, 0, 5, 6)
251 #define SYSREG_DBGWCR5_EL1    SYSREG(2, 0, 0, 5, 7)
252 #define SYSREG_DBGBVR6_EL1    SYSREG(2, 0, 0, 6, 4)
253 #define SYSREG_DBGBCR6_EL1    SYSREG(2, 0, 0, 6, 5)
254 #define SYSREG_DBGWVR6_EL1    SYSREG(2, 0, 0, 6, 6)
255 #define SYSREG_DBGWCR6_EL1    SYSREG(2, 0, 0, 6, 7)
256 #define SYSREG_DBGBVR7_EL1    SYSREG(2, 0, 0, 7, 4)
257 #define SYSREG_DBGBCR7_EL1    SYSREG(2, 0, 0, 7, 5)
258 #define SYSREG_DBGWVR7_EL1    SYSREG(2, 0, 0, 7, 6)
259 #define SYSREG_DBGWCR7_EL1    SYSREG(2, 0, 0, 7, 7)
260 #define SYSREG_DBGBVR8_EL1    SYSREG(2, 0, 0, 8, 4)
261 #define SYSREG_DBGBCR8_EL1    SYSREG(2, 0, 0, 8, 5)
262 #define SYSREG_DBGWVR8_EL1    SYSREG(2, 0, 0, 8, 6)
263 #define SYSREG_DBGWCR8_EL1    SYSREG(2, 0, 0, 8, 7)
264 #define SYSREG_DBGBVR9_EL1    SYSREG(2, 0, 0, 9, 4)
265 #define SYSREG_DBGBCR9_EL1    SYSREG(2, 0, 0, 9, 5)
266 #define SYSREG_DBGWVR9_EL1    SYSREG(2, 0, 0, 9, 6)
267 #define SYSREG_DBGWCR9_EL1    SYSREG(2, 0, 0, 9, 7)
268 #define SYSREG_DBGBVR10_EL1   SYSREG(2, 0, 0, 10, 4)
269 #define SYSREG_DBGBCR10_EL1   SYSREG(2, 0, 0, 10, 5)
270 #define SYSREG_DBGWVR10_EL1   SYSREG(2, 0, 0, 10, 6)
271 #define SYSREG_DBGWCR10_EL1   SYSREG(2, 0, 0, 10, 7)
272 #define SYSREG_DBGBVR11_EL1   SYSREG(2, 0, 0, 11, 4)
273 #define SYSREG_DBGBCR11_EL1   SYSREG(2, 0, 0, 11, 5)
274 #define SYSREG_DBGWVR11_EL1   SYSREG(2, 0, 0, 11, 6)
275 #define SYSREG_DBGWCR11_EL1   SYSREG(2, 0, 0, 11, 7)
276 #define SYSREG_DBGBVR12_EL1   SYSREG(2, 0, 0, 12, 4)
277 #define SYSREG_DBGBCR12_EL1   SYSREG(2, 0, 0, 12, 5)
278 #define SYSREG_DBGWVR12_EL1   SYSREG(2, 0, 0, 12, 6)
279 #define SYSREG_DBGWCR12_EL1   SYSREG(2, 0, 0, 12, 7)
280 #define SYSREG_DBGBVR13_EL1   SYSREG(2, 0, 0, 13, 4)
281 #define SYSREG_DBGBCR13_EL1   SYSREG(2, 0, 0, 13, 5)
282 #define SYSREG_DBGWVR13_EL1   SYSREG(2, 0, 0, 13, 6)
283 #define SYSREG_DBGWCR13_EL1   SYSREG(2, 0, 0, 13, 7)
284 #define SYSREG_DBGBVR14_EL1   SYSREG(2, 0, 0, 14, 4)
285 #define SYSREG_DBGBCR14_EL1   SYSREG(2, 0, 0, 14, 5)
286 #define SYSREG_DBGWVR14_EL1   SYSREG(2, 0, 0, 14, 6)
287 #define SYSREG_DBGWCR14_EL1   SYSREG(2, 0, 0, 14, 7)
288 #define SYSREG_DBGBVR15_EL1   SYSREG(2, 0, 0, 15, 4)
289 #define SYSREG_DBGBCR15_EL1   SYSREG(2, 0, 0, 15, 5)
290 #define SYSREG_DBGWVR15_EL1   SYSREG(2, 0, 0, 15, 6)
291 #define SYSREG_DBGWCR15_EL1   SYSREG(2, 0, 0, 15, 7)
292 
293 #define WFX_IS_WFE (1 << 0)
294 
295 #define TMR_CTL_ENABLE  (1 << 0)
296 #define TMR_CTL_IMASK   (1 << 1)
297 #define TMR_CTL_ISTATUS (1 << 2)
298 
299 static void hvf_wfi(CPUState *cpu);
300 
301 static uint32_t chosen_ipa_bit_size;
302 
303 typedef struct HVFVTimer {
304     /* Vtimer value during migration and paused state */
305     uint64_t vtimer_val;
306 } HVFVTimer;
307 
308 static HVFVTimer vtimer;
309 
310 typedef struct ARMHostCPUFeatures {
311     ARMISARegisters isar;
312     uint64_t features;
313     uint64_t midr;
314     uint32_t reset_sctlr;
315     const char *dtb_compatible;
316 } ARMHostCPUFeatures;
317 
318 static ARMHostCPUFeatures arm_host_cpu_features;
319 
320 struct hvf_reg_match {
321     int reg;
322     uint64_t offset;
323 };
324 
325 static const struct hvf_reg_match hvf_reg_match[] = {
326     { HV_REG_X0,   offsetof(CPUARMState, xregs[0]) },
327     { HV_REG_X1,   offsetof(CPUARMState, xregs[1]) },
328     { HV_REG_X2,   offsetof(CPUARMState, xregs[2]) },
329     { HV_REG_X3,   offsetof(CPUARMState, xregs[3]) },
330     { HV_REG_X4,   offsetof(CPUARMState, xregs[4]) },
331     { HV_REG_X5,   offsetof(CPUARMState, xregs[5]) },
332     { HV_REG_X6,   offsetof(CPUARMState, xregs[6]) },
333     { HV_REG_X7,   offsetof(CPUARMState, xregs[7]) },
334     { HV_REG_X8,   offsetof(CPUARMState, xregs[8]) },
335     { HV_REG_X9,   offsetof(CPUARMState, xregs[9]) },
336     { HV_REG_X10,  offsetof(CPUARMState, xregs[10]) },
337     { HV_REG_X11,  offsetof(CPUARMState, xregs[11]) },
338     { HV_REG_X12,  offsetof(CPUARMState, xregs[12]) },
339     { HV_REG_X13,  offsetof(CPUARMState, xregs[13]) },
340     { HV_REG_X14,  offsetof(CPUARMState, xregs[14]) },
341     { HV_REG_X15,  offsetof(CPUARMState, xregs[15]) },
342     { HV_REG_X16,  offsetof(CPUARMState, xregs[16]) },
343     { HV_REG_X17,  offsetof(CPUARMState, xregs[17]) },
344     { HV_REG_X18,  offsetof(CPUARMState, xregs[18]) },
345     { HV_REG_X19,  offsetof(CPUARMState, xregs[19]) },
346     { HV_REG_X20,  offsetof(CPUARMState, xregs[20]) },
347     { HV_REG_X21,  offsetof(CPUARMState, xregs[21]) },
348     { HV_REG_X22,  offsetof(CPUARMState, xregs[22]) },
349     { HV_REG_X23,  offsetof(CPUARMState, xregs[23]) },
350     { HV_REG_X24,  offsetof(CPUARMState, xregs[24]) },
351     { HV_REG_X25,  offsetof(CPUARMState, xregs[25]) },
352     { HV_REG_X26,  offsetof(CPUARMState, xregs[26]) },
353     { HV_REG_X27,  offsetof(CPUARMState, xregs[27]) },
354     { HV_REG_X28,  offsetof(CPUARMState, xregs[28]) },
355     { HV_REG_X29,  offsetof(CPUARMState, xregs[29]) },
356     { HV_REG_X30,  offsetof(CPUARMState, xregs[30]) },
357     { HV_REG_PC,   offsetof(CPUARMState, pc) },
358 };
359 
360 static const struct hvf_reg_match hvf_fpreg_match[] = {
361     { HV_SIMD_FP_REG_Q0,  offsetof(CPUARMState, vfp.zregs[0]) },
362     { HV_SIMD_FP_REG_Q1,  offsetof(CPUARMState, vfp.zregs[1]) },
363     { HV_SIMD_FP_REG_Q2,  offsetof(CPUARMState, vfp.zregs[2]) },
364     { HV_SIMD_FP_REG_Q3,  offsetof(CPUARMState, vfp.zregs[3]) },
365     { HV_SIMD_FP_REG_Q4,  offsetof(CPUARMState, vfp.zregs[4]) },
366     { HV_SIMD_FP_REG_Q5,  offsetof(CPUARMState, vfp.zregs[5]) },
367     { HV_SIMD_FP_REG_Q6,  offsetof(CPUARMState, vfp.zregs[6]) },
368     { HV_SIMD_FP_REG_Q7,  offsetof(CPUARMState, vfp.zregs[7]) },
369     { HV_SIMD_FP_REG_Q8,  offsetof(CPUARMState, vfp.zregs[8]) },
370     { HV_SIMD_FP_REG_Q9,  offsetof(CPUARMState, vfp.zregs[9]) },
371     { HV_SIMD_FP_REG_Q10, offsetof(CPUARMState, vfp.zregs[10]) },
372     { HV_SIMD_FP_REG_Q11, offsetof(CPUARMState, vfp.zregs[11]) },
373     { HV_SIMD_FP_REG_Q12, offsetof(CPUARMState, vfp.zregs[12]) },
374     { HV_SIMD_FP_REG_Q13, offsetof(CPUARMState, vfp.zregs[13]) },
375     { HV_SIMD_FP_REG_Q14, offsetof(CPUARMState, vfp.zregs[14]) },
376     { HV_SIMD_FP_REG_Q15, offsetof(CPUARMState, vfp.zregs[15]) },
377     { HV_SIMD_FP_REG_Q16, offsetof(CPUARMState, vfp.zregs[16]) },
378     { HV_SIMD_FP_REG_Q17, offsetof(CPUARMState, vfp.zregs[17]) },
379     { HV_SIMD_FP_REG_Q18, offsetof(CPUARMState, vfp.zregs[18]) },
380     { HV_SIMD_FP_REG_Q19, offsetof(CPUARMState, vfp.zregs[19]) },
381     { HV_SIMD_FP_REG_Q20, offsetof(CPUARMState, vfp.zregs[20]) },
382     { HV_SIMD_FP_REG_Q21, offsetof(CPUARMState, vfp.zregs[21]) },
383     { HV_SIMD_FP_REG_Q22, offsetof(CPUARMState, vfp.zregs[22]) },
384     { HV_SIMD_FP_REG_Q23, offsetof(CPUARMState, vfp.zregs[23]) },
385     { HV_SIMD_FP_REG_Q24, offsetof(CPUARMState, vfp.zregs[24]) },
386     { HV_SIMD_FP_REG_Q25, offsetof(CPUARMState, vfp.zregs[25]) },
387     { HV_SIMD_FP_REG_Q26, offsetof(CPUARMState, vfp.zregs[26]) },
388     { HV_SIMD_FP_REG_Q27, offsetof(CPUARMState, vfp.zregs[27]) },
389     { HV_SIMD_FP_REG_Q28, offsetof(CPUARMState, vfp.zregs[28]) },
390     { HV_SIMD_FP_REG_Q29, offsetof(CPUARMState, vfp.zregs[29]) },
391     { HV_SIMD_FP_REG_Q30, offsetof(CPUARMState, vfp.zregs[30]) },
392     { HV_SIMD_FP_REG_Q31, offsetof(CPUARMState, vfp.zregs[31]) },
393 };
394 
395 struct hvf_sreg_match {
396     int reg;
397     uint32_t key;
398     uint32_t cp_idx;
399 };
400 
401 static struct hvf_sreg_match hvf_sreg_match[] = {
402     { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 2, 0, 4) },
403     { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 2, 0, 5) },
404     { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 2, 0, 6) },
405     { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 2, 0, 7) },
406 
407     { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 2, 0, 4) },
408     { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 2, 0, 5) },
409     { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 2, 0, 6) },
410     { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 2, 0, 7) },
411 
412     { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 2, 0, 4) },
413     { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 2, 0, 5) },
414     { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 2, 0, 6) },
415     { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 2, 0, 7) },
416 
417     { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 2, 0, 4) },
418     { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 2, 0, 5) },
419     { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 2, 0, 6) },
420     { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 2, 0, 7) },
421 
422     { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 2, 0, 4) },
423     { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 2, 0, 5) },
424     { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 2, 0, 6) },
425     { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 2, 0, 7) },
426 
427     { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 2, 0, 4) },
428     { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 2, 0, 5) },
429     { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 2, 0, 6) },
430     { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 2, 0, 7) },
431 
432     { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 2, 0, 4) },
433     { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 2, 0, 5) },
434     { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 2, 0, 6) },
435     { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 2, 0, 7) },
436 
437     { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 2, 0, 4) },
438     { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 2, 0, 5) },
439     { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 2, 0, 6) },
440     { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 2, 0, 7) },
441 
442     { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 2, 0, 4) },
443     { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 2, 0, 5) },
444     { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 2, 0, 6) },
445     { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 2, 0, 7) },
446 
447     { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 2, 0, 4) },
448     { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 2, 0, 5) },
449     { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 2, 0, 6) },
450     { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 2, 0, 7) },
451 
452     { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 2, 0, 4) },
453     { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 2, 0, 5) },
454     { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 2, 0, 6) },
455     { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 2, 0, 7) },
456 
457     { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 2, 0, 4) },
458     { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 2, 0, 5) },
459     { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 2, 0, 6) },
460     { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 2, 0, 7) },
461 
462     { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 2, 0, 4) },
463     { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 2, 0, 5) },
464     { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 2, 0, 6) },
465     { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 2, 0, 7) },
466 
467     { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 2, 0, 4) },
468     { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 2, 0, 5) },
469     { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 2, 0, 6) },
470     { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 2, 0, 7) },
471 
472     { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 2, 0, 4) },
473     { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 2, 0, 5) },
474     { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 2, 0, 6) },
475     { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 2, 0, 7) },
476 
477     { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 2, 0, 4) },
478     { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 2, 0, 5) },
479     { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 2, 0, 6) },
480     { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 2, 0, 7) },
481 
482 #ifdef SYNC_NO_RAW_REGS
483     /*
484      * The registers below are manually synced on init because they are
485      * marked as NO_RAW. We still list them to make number space sync easier.
486      */
487     { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) },
488     { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) },
489     { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) },
490     { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) },
491 #endif
492     { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 1) },
493     { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) },
494     { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) },
495     { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) },
496     { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) },
497 #ifdef SYNC_NO_MMFR0
498     /* We keep the hardware MMFR0 around. HW limits are there anyway */
499     { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) },
500 #endif
501     { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) },
502     { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) },
503     /* Add ID_AA64MMFR3_EL1 here when HVF supports it */
504 
505     { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) },
506     { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) },
507     { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) },
508     { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) },
509     { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) },
510     { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) },
511 
512     { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) },
513     { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) },
514     { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) },
515     { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) },
516     { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) },
517     { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) },
518     { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) },
519     { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) },
520     { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) },
521     { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) },
522 
523     { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 0, 0) },
524     { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) },
525     { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) },
526     { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) },
527     { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) },
528     { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) },
529     { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) },
530     { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) },
531     { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) },
532     { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) },
533     { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) },
534     { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) },
535     { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) },
536     { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) },
537     { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) },
538     { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) },
539     { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) },
540     { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) },
541     { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) },
542     { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) },
543 };
544 
hvf_get_registers(CPUState * cpu)545 int hvf_get_registers(CPUState *cpu)
546 {
547     ARMCPU *arm_cpu = ARM_CPU(cpu);
548     CPUARMState *env = &arm_cpu->env;
549     hv_return_t ret;
550     uint64_t val;
551     hv_simd_fp_uchar16_t fpval;
552     int i;
553 
554     for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
555         ret = hv_vcpu_get_reg(cpu->accel->fd, hvf_reg_match[i].reg, &val);
556         *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val;
557         assert_hvf_ok(ret);
558     }
559 
560     for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
561         ret = hv_vcpu_get_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg,
562                                       &fpval);
563         memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval));
564         assert_hvf_ok(ret);
565     }
566 
567     val = 0;
568     ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPCR, &val);
569     assert_hvf_ok(ret);
570     vfp_set_fpcr(env, val);
571 
572     val = 0;
573     ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPSR, &val);
574     assert_hvf_ok(ret);
575     vfp_set_fpsr(env, val);
576 
577     ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_CPSR, &val);
578     assert_hvf_ok(ret);
579     pstate_write(env, val);
580 
581     for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
582         if (hvf_sreg_match[i].cp_idx == -1) {
583             continue;
584         }
585 
586         if (cpu->accel->guest_debug_enabled) {
587             /* Handle debug registers */
588             switch (hvf_sreg_match[i].reg) {
589             case HV_SYS_REG_DBGBVR0_EL1:
590             case HV_SYS_REG_DBGBCR0_EL1:
591             case HV_SYS_REG_DBGWVR0_EL1:
592             case HV_SYS_REG_DBGWCR0_EL1:
593             case HV_SYS_REG_DBGBVR1_EL1:
594             case HV_SYS_REG_DBGBCR1_EL1:
595             case HV_SYS_REG_DBGWVR1_EL1:
596             case HV_SYS_REG_DBGWCR1_EL1:
597             case HV_SYS_REG_DBGBVR2_EL1:
598             case HV_SYS_REG_DBGBCR2_EL1:
599             case HV_SYS_REG_DBGWVR2_EL1:
600             case HV_SYS_REG_DBGWCR2_EL1:
601             case HV_SYS_REG_DBGBVR3_EL1:
602             case HV_SYS_REG_DBGBCR3_EL1:
603             case HV_SYS_REG_DBGWVR3_EL1:
604             case HV_SYS_REG_DBGWCR3_EL1:
605             case HV_SYS_REG_DBGBVR4_EL1:
606             case HV_SYS_REG_DBGBCR4_EL1:
607             case HV_SYS_REG_DBGWVR4_EL1:
608             case HV_SYS_REG_DBGWCR4_EL1:
609             case HV_SYS_REG_DBGBVR5_EL1:
610             case HV_SYS_REG_DBGBCR5_EL1:
611             case HV_SYS_REG_DBGWVR5_EL1:
612             case HV_SYS_REG_DBGWCR5_EL1:
613             case HV_SYS_REG_DBGBVR6_EL1:
614             case HV_SYS_REG_DBGBCR6_EL1:
615             case HV_SYS_REG_DBGWVR6_EL1:
616             case HV_SYS_REG_DBGWCR6_EL1:
617             case HV_SYS_REG_DBGBVR7_EL1:
618             case HV_SYS_REG_DBGBCR7_EL1:
619             case HV_SYS_REG_DBGWVR7_EL1:
620             case HV_SYS_REG_DBGWCR7_EL1:
621             case HV_SYS_REG_DBGBVR8_EL1:
622             case HV_SYS_REG_DBGBCR8_EL1:
623             case HV_SYS_REG_DBGWVR8_EL1:
624             case HV_SYS_REG_DBGWCR8_EL1:
625             case HV_SYS_REG_DBGBVR9_EL1:
626             case HV_SYS_REG_DBGBCR9_EL1:
627             case HV_SYS_REG_DBGWVR9_EL1:
628             case HV_SYS_REG_DBGWCR9_EL1:
629             case HV_SYS_REG_DBGBVR10_EL1:
630             case HV_SYS_REG_DBGBCR10_EL1:
631             case HV_SYS_REG_DBGWVR10_EL1:
632             case HV_SYS_REG_DBGWCR10_EL1:
633             case HV_SYS_REG_DBGBVR11_EL1:
634             case HV_SYS_REG_DBGBCR11_EL1:
635             case HV_SYS_REG_DBGWVR11_EL1:
636             case HV_SYS_REG_DBGWCR11_EL1:
637             case HV_SYS_REG_DBGBVR12_EL1:
638             case HV_SYS_REG_DBGBCR12_EL1:
639             case HV_SYS_REG_DBGWVR12_EL1:
640             case HV_SYS_REG_DBGWCR12_EL1:
641             case HV_SYS_REG_DBGBVR13_EL1:
642             case HV_SYS_REG_DBGBCR13_EL1:
643             case HV_SYS_REG_DBGWVR13_EL1:
644             case HV_SYS_REG_DBGWCR13_EL1:
645             case HV_SYS_REG_DBGBVR14_EL1:
646             case HV_SYS_REG_DBGBCR14_EL1:
647             case HV_SYS_REG_DBGWVR14_EL1:
648             case HV_SYS_REG_DBGWCR14_EL1:
649             case HV_SYS_REG_DBGBVR15_EL1:
650             case HV_SYS_REG_DBGBCR15_EL1:
651             case HV_SYS_REG_DBGWVR15_EL1:
652             case HV_SYS_REG_DBGWCR15_EL1: {
653                 /*
654                  * If the guest is being debugged, the vCPU's debug registers
655                  * are holding the gdbstub's view of the registers (set in
656                  * hvf_arch_update_guest_debug()).
657                  * Since the environment is used to store only the guest's view
658                  * of the registers, don't update it with the values from the
659                  * vCPU but simply keep the values from the previous
660                  * environment.
661                  */
662                 const ARMCPRegInfo *ri;
663                 ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_sreg_match[i].key);
664                 val = read_raw_cp_reg(env, ri);
665 
666                 arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
667                 continue;
668             }
669             }
670         }
671 
672         ret = hv_vcpu_get_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, &val);
673         assert_hvf_ok(ret);
674 
675         arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
676     }
677     assert(write_list_to_cpustate(arm_cpu));
678 
679     aarch64_restore_sp(env, arm_current_el(env));
680 
681     return 0;
682 }
683 
hvf_put_registers(CPUState * cpu)684 int hvf_put_registers(CPUState *cpu)
685 {
686     ARMCPU *arm_cpu = ARM_CPU(cpu);
687     CPUARMState *env = &arm_cpu->env;
688     hv_return_t ret;
689     uint64_t val;
690     hv_simd_fp_uchar16_t fpval;
691     int i;
692 
693     for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
694         val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset);
695         ret = hv_vcpu_set_reg(cpu->accel->fd, hvf_reg_match[i].reg, val);
696         assert_hvf_ok(ret);
697     }
698 
699     for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
700         memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval));
701         ret = hv_vcpu_set_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg,
702                                       fpval);
703         assert_hvf_ok(ret);
704     }
705 
706     ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPCR, vfp_get_fpcr(env));
707     assert_hvf_ok(ret);
708 
709     ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPSR, vfp_get_fpsr(env));
710     assert_hvf_ok(ret);
711 
712     ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_CPSR, pstate_read(env));
713     assert_hvf_ok(ret);
714 
715     aarch64_save_sp(env, arm_current_el(env));
716 
717     assert(write_cpustate_to_list(arm_cpu, false));
718     for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
719         if (hvf_sreg_match[i].cp_idx == -1) {
720             continue;
721         }
722 
723         if (cpu->accel->guest_debug_enabled) {
724             /* Handle debug registers */
725             switch (hvf_sreg_match[i].reg) {
726             case HV_SYS_REG_DBGBVR0_EL1:
727             case HV_SYS_REG_DBGBCR0_EL1:
728             case HV_SYS_REG_DBGWVR0_EL1:
729             case HV_SYS_REG_DBGWCR0_EL1:
730             case HV_SYS_REG_DBGBVR1_EL1:
731             case HV_SYS_REG_DBGBCR1_EL1:
732             case HV_SYS_REG_DBGWVR1_EL1:
733             case HV_SYS_REG_DBGWCR1_EL1:
734             case HV_SYS_REG_DBGBVR2_EL1:
735             case HV_SYS_REG_DBGBCR2_EL1:
736             case HV_SYS_REG_DBGWVR2_EL1:
737             case HV_SYS_REG_DBGWCR2_EL1:
738             case HV_SYS_REG_DBGBVR3_EL1:
739             case HV_SYS_REG_DBGBCR3_EL1:
740             case HV_SYS_REG_DBGWVR3_EL1:
741             case HV_SYS_REG_DBGWCR3_EL1:
742             case HV_SYS_REG_DBGBVR4_EL1:
743             case HV_SYS_REG_DBGBCR4_EL1:
744             case HV_SYS_REG_DBGWVR4_EL1:
745             case HV_SYS_REG_DBGWCR4_EL1:
746             case HV_SYS_REG_DBGBVR5_EL1:
747             case HV_SYS_REG_DBGBCR5_EL1:
748             case HV_SYS_REG_DBGWVR5_EL1:
749             case HV_SYS_REG_DBGWCR5_EL1:
750             case HV_SYS_REG_DBGBVR6_EL1:
751             case HV_SYS_REG_DBGBCR6_EL1:
752             case HV_SYS_REG_DBGWVR6_EL1:
753             case HV_SYS_REG_DBGWCR6_EL1:
754             case HV_SYS_REG_DBGBVR7_EL1:
755             case HV_SYS_REG_DBGBCR7_EL1:
756             case HV_SYS_REG_DBGWVR7_EL1:
757             case HV_SYS_REG_DBGWCR7_EL1:
758             case HV_SYS_REG_DBGBVR8_EL1:
759             case HV_SYS_REG_DBGBCR8_EL1:
760             case HV_SYS_REG_DBGWVR8_EL1:
761             case HV_SYS_REG_DBGWCR8_EL1:
762             case HV_SYS_REG_DBGBVR9_EL1:
763             case HV_SYS_REG_DBGBCR9_EL1:
764             case HV_SYS_REG_DBGWVR9_EL1:
765             case HV_SYS_REG_DBGWCR9_EL1:
766             case HV_SYS_REG_DBGBVR10_EL1:
767             case HV_SYS_REG_DBGBCR10_EL1:
768             case HV_SYS_REG_DBGWVR10_EL1:
769             case HV_SYS_REG_DBGWCR10_EL1:
770             case HV_SYS_REG_DBGBVR11_EL1:
771             case HV_SYS_REG_DBGBCR11_EL1:
772             case HV_SYS_REG_DBGWVR11_EL1:
773             case HV_SYS_REG_DBGWCR11_EL1:
774             case HV_SYS_REG_DBGBVR12_EL1:
775             case HV_SYS_REG_DBGBCR12_EL1:
776             case HV_SYS_REG_DBGWVR12_EL1:
777             case HV_SYS_REG_DBGWCR12_EL1:
778             case HV_SYS_REG_DBGBVR13_EL1:
779             case HV_SYS_REG_DBGBCR13_EL1:
780             case HV_SYS_REG_DBGWVR13_EL1:
781             case HV_SYS_REG_DBGWCR13_EL1:
782             case HV_SYS_REG_DBGBVR14_EL1:
783             case HV_SYS_REG_DBGBCR14_EL1:
784             case HV_SYS_REG_DBGWVR14_EL1:
785             case HV_SYS_REG_DBGWCR14_EL1:
786             case HV_SYS_REG_DBGBVR15_EL1:
787             case HV_SYS_REG_DBGBCR15_EL1:
788             case HV_SYS_REG_DBGWVR15_EL1:
789             case HV_SYS_REG_DBGWCR15_EL1:
790                 /*
791                  * If the guest is being debugged, the vCPU's debug registers
792                  * are already holding the gdbstub's view of the registers (set
793                  * in hvf_arch_update_guest_debug()).
794                  */
795                 continue;
796             }
797         }
798 
799         val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx];
800         ret = hv_vcpu_set_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, val);
801         assert_hvf_ok(ret);
802     }
803 
804     ret = hv_vcpu_set_vtimer_offset(cpu->accel->fd, hvf_state->vtimer_offset);
805     assert_hvf_ok(ret);
806 
807     return 0;
808 }
809 
flush_cpu_state(CPUState * cpu)810 static void flush_cpu_state(CPUState *cpu)
811 {
812     if (cpu->accel->dirty) {
813         hvf_put_registers(cpu);
814         cpu->accel->dirty = false;
815     }
816 }
817 
hvf_set_reg(CPUState * cpu,int rt,uint64_t val)818 static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val)
819 {
820     hv_return_t r;
821 
822     flush_cpu_state(cpu);
823 
824     if (rt < 31) {
825         r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_X0 + rt, val);
826         assert_hvf_ok(r);
827     }
828 }
829 
hvf_get_reg(CPUState * cpu,int rt)830 static uint64_t hvf_get_reg(CPUState *cpu, int rt)
831 {
832     uint64_t val = 0;
833     hv_return_t r;
834 
835     flush_cpu_state(cpu);
836 
837     if (rt < 31) {
838         r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_X0 + rt, &val);
839         assert_hvf_ok(r);
840     }
841 
842     return val;
843 }
844 
clamp_id_aa64mmfr0_parange_to_ipa_size(uint64_t * id_aa64mmfr0)845 static void clamp_id_aa64mmfr0_parange_to_ipa_size(uint64_t *id_aa64mmfr0)
846 {
847     uint32_t ipa_size = chosen_ipa_bit_size ?
848             chosen_ipa_bit_size : hvf_arm_get_max_ipa_bit_size();
849 
850     /* Clamp down the PARange to the IPA size the kernel supports. */
851     uint8_t index = round_down_to_parange_index(ipa_size);
852     *id_aa64mmfr0 = (*id_aa64mmfr0 & ~R_ID_AA64MMFR0_PARANGE_MASK) | index;
853 }
854 
hvf_arm_get_host_cpu_features(ARMHostCPUFeatures * ahcf)855 static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
856 {
857     ARMISARegisters host_isar = {};
858     const struct isar_regs {
859         int reg;
860         uint64_t *val;
861     } regs[] = {
862         { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 },
863         { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 },
864         { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 },
865         { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 },
866         { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 },
867         { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 },
868         /* Add ID_AA64ISAR2_EL1 here when HVF supports it */
869         { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 },
870         { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 },
871         { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 },
872         /* Add ID_AA64MMFR3_EL1 here when HVF supports it */
873     };
874     hv_vcpu_t fd;
875     hv_return_t r = HV_SUCCESS;
876     hv_vcpu_exit_t *exit;
877     int i;
878 
879     ahcf->dtb_compatible = "arm,arm-v8";
880     ahcf->features = (1ULL << ARM_FEATURE_V8) |
881                      (1ULL << ARM_FEATURE_NEON) |
882                      (1ULL << ARM_FEATURE_AARCH64) |
883                      (1ULL << ARM_FEATURE_PMU) |
884                      (1ULL << ARM_FEATURE_GENERIC_TIMER);
885 
886     /* We set up a small vcpu to extract host registers */
887 
888     if (hv_vcpu_create(&fd, &exit, NULL) != HV_SUCCESS) {
889         return false;
890     }
891 
892     for (i = 0; i < ARRAY_SIZE(regs); i++) {
893         r |= hv_vcpu_get_sys_reg(fd, regs[i].reg, regs[i].val);
894     }
895     r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr);
896     r |= hv_vcpu_destroy(fd);
897 
898     clamp_id_aa64mmfr0_parange_to_ipa_size(&host_isar.id_aa64mmfr0);
899 
900     ahcf->isar = host_isar;
901 
902     /*
903      * A scratch vCPU returns SCTLR 0, so let's fill our default with the M1
904      * boot SCTLR from https://github.com/AsahiLinux/m1n1/issues/97
905      */
906     ahcf->reset_sctlr = 0x30100180;
907     /*
908      * SPAN is disabled by default when SCTLR.SPAN=1. To improve compatibility,
909      * let's disable it on boot and then allow guest software to turn it on by
910      * setting it to 0.
911      */
912     ahcf->reset_sctlr |= 0x00800000;
913 
914     /* Make sure we don't advertise AArch32 support for EL0/EL1 */
915     if ((host_isar.id_aa64pfr0 & 0xff) != 0x11) {
916         return false;
917     }
918 
919     return r == HV_SUCCESS;
920 }
921 
hvf_arm_get_default_ipa_bit_size(void)922 uint32_t hvf_arm_get_default_ipa_bit_size(void)
923 {
924     uint32_t default_ipa_size;
925     hv_return_t ret = hv_vm_config_get_default_ipa_size(&default_ipa_size);
926     assert_hvf_ok(ret);
927 
928     return default_ipa_size;
929 }
930 
hvf_arm_get_max_ipa_bit_size(void)931 uint32_t hvf_arm_get_max_ipa_bit_size(void)
932 {
933     uint32_t max_ipa_size;
934     hv_return_t ret = hv_vm_config_get_max_ipa_size(&max_ipa_size);
935     assert_hvf_ok(ret);
936 
937     /*
938      * We clamp any IPA size we want to back the VM with to a valid PARange
939      * value so the guest doesn't try and map memory outside of the valid range.
940      * This logic just clamps the passed in IPA bit size to the first valid
941      * PARange value <= to it.
942      */
943     return round_down_to_parange_bit_size(max_ipa_size);
944 }
945 
hvf_arm_set_cpu_features_from_host(ARMCPU * cpu)946 void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu)
947 {
948     if (!arm_host_cpu_features.dtb_compatible) {
949         if (!hvf_enabled() ||
950             !hvf_arm_get_host_cpu_features(&arm_host_cpu_features)) {
951             /*
952              * We can't report this error yet, so flag that we need to
953              * in arm_cpu_realizefn().
954              */
955             cpu->host_cpu_probe_failed = true;
956             return;
957         }
958     }
959 
960     cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
961     cpu->isar = arm_host_cpu_features.isar;
962     cpu->env.features = arm_host_cpu_features.features;
963     cpu->midr = arm_host_cpu_features.midr;
964     cpu->reset_sctlr = arm_host_cpu_features.reset_sctlr;
965 }
966 
hvf_arch_vcpu_destroy(CPUState * cpu)967 void hvf_arch_vcpu_destroy(CPUState *cpu)
968 {
969 }
970 
hvf_arch_vm_create(MachineState * ms,uint32_t pa_range)971 hv_return_t hvf_arch_vm_create(MachineState *ms, uint32_t pa_range)
972 {
973     hv_return_t ret;
974     hv_vm_config_t config = hv_vm_config_create();
975 
976     ret = hv_vm_config_set_ipa_size(config, pa_range);
977     if (ret != HV_SUCCESS) {
978         goto cleanup;
979     }
980     chosen_ipa_bit_size = pa_range;
981 
982     ret = hv_vm_create(config);
983 
984 cleanup:
985     os_release(config);
986 
987     return ret;
988 }
989 
hvf_arch_init_vcpu(CPUState * cpu)990 int hvf_arch_init_vcpu(CPUState *cpu)
991 {
992     ARMCPU *arm_cpu = ARM_CPU(cpu);
993     CPUARMState *env = &arm_cpu->env;
994     uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match);
995     uint32_t sregs_cnt = 0;
996     uint64_t pfr;
997     hv_return_t ret;
998     int i;
999 
1000     env->aarch64 = true;
1001     asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz));
1002 
1003     /* Allocate enough space for our sysreg sync */
1004     arm_cpu->cpreg_indexes = g_renew(uint64_t, arm_cpu->cpreg_indexes,
1005                                      sregs_match_len);
1006     arm_cpu->cpreg_values = g_renew(uint64_t, arm_cpu->cpreg_values,
1007                                     sregs_match_len);
1008     arm_cpu->cpreg_vmstate_indexes = g_renew(uint64_t,
1009                                              arm_cpu->cpreg_vmstate_indexes,
1010                                              sregs_match_len);
1011     arm_cpu->cpreg_vmstate_values = g_renew(uint64_t,
1012                                             arm_cpu->cpreg_vmstate_values,
1013                                             sregs_match_len);
1014 
1015     memset(arm_cpu->cpreg_values, 0, sregs_match_len * sizeof(uint64_t));
1016 
1017     /* Populate cp list for all known sysregs */
1018     for (i = 0; i < sregs_match_len; i++) {
1019         const ARMCPRegInfo *ri;
1020         uint32_t key = hvf_sreg_match[i].key;
1021 
1022         ri = get_arm_cp_reginfo(arm_cpu->cp_regs, key);
1023         if (ri) {
1024             assert(!(ri->type & ARM_CP_NO_RAW));
1025             hvf_sreg_match[i].cp_idx = sregs_cnt;
1026             arm_cpu->cpreg_indexes[sregs_cnt++] = cpreg_to_kvm_id(key);
1027         } else {
1028             hvf_sreg_match[i].cp_idx = -1;
1029         }
1030     }
1031     arm_cpu->cpreg_array_len = sregs_cnt;
1032     arm_cpu->cpreg_vmstate_array_len = sregs_cnt;
1033 
1034     assert(write_cpustate_to_list(arm_cpu, false));
1035 
1036     /* Set CP_NO_RAW system registers on init */
1037     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MIDR_EL1,
1038                               arm_cpu->midr);
1039     assert_hvf_ok(ret);
1040 
1041     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MPIDR_EL1,
1042                               arm_cpu->mp_affinity);
1043     assert_hvf_ok(ret);
1044 
1045     ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr);
1046     assert_hvf_ok(ret);
1047     pfr |= env->gicv3state ? (1 << 24) : 0;
1048     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr);
1049     assert_hvf_ok(ret);
1050 
1051     /* We're limited to underlying hardware caps, override internal versions */
1052     ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
1053                               &arm_cpu->isar.id_aa64mmfr0);
1054     assert_hvf_ok(ret);
1055 
1056     clamp_id_aa64mmfr0_parange_to_ipa_size(&arm_cpu->isar.id_aa64mmfr0);
1057     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
1058                               arm_cpu->isar.id_aa64mmfr0);
1059     assert_hvf_ok(ret);
1060 
1061     return 0;
1062 }
1063 
hvf_kick_vcpu_thread(CPUState * cpu)1064 void hvf_kick_vcpu_thread(CPUState *cpu)
1065 {
1066     cpus_kick_thread(cpu);
1067     hv_vcpus_exit(&cpu->accel->fd, 1);
1068 }
1069 
hvf_raise_exception(CPUState * cpu,uint32_t excp,uint32_t syndrome)1070 static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
1071                                 uint32_t syndrome)
1072 {
1073     ARMCPU *arm_cpu = ARM_CPU(cpu);
1074     CPUARMState *env = &arm_cpu->env;
1075 
1076     cpu->exception_index = excp;
1077     env->exception.target_el = 1;
1078     env->exception.syndrome = syndrome;
1079 
1080     arm_cpu_do_interrupt(cpu);
1081 }
1082 
hvf_psci_cpu_off(ARMCPU * arm_cpu)1083 static void hvf_psci_cpu_off(ARMCPU *arm_cpu)
1084 {
1085     int32_t ret = arm_set_cpu_off(arm_cpu_mp_affinity(arm_cpu));
1086     assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS);
1087 }
1088 
1089 /*
1090  * Handle a PSCI call.
1091  *
1092  * Returns 0 on success
1093  *         -1 when the PSCI call is unknown,
1094  */
hvf_handle_psci_call(CPUState * cpu)1095 static bool hvf_handle_psci_call(CPUState *cpu)
1096 {
1097     ARMCPU *arm_cpu = ARM_CPU(cpu);
1098     CPUARMState *env = &arm_cpu->env;
1099     uint64_t param[4] = {
1100         env->xregs[0],
1101         env->xregs[1],
1102         env->xregs[2],
1103         env->xregs[3]
1104     };
1105     uint64_t context_id, mpidr;
1106     bool target_aarch64 = true;
1107     CPUState *target_cpu_state;
1108     ARMCPU *target_cpu;
1109     target_ulong entry;
1110     int target_el = 1;
1111     int32_t ret = 0;
1112 
1113     trace_hvf_psci_call(param[0], param[1], param[2], param[3],
1114                         arm_cpu_mp_affinity(arm_cpu));
1115 
1116     switch (param[0]) {
1117     case QEMU_PSCI_0_2_FN_PSCI_VERSION:
1118         ret = QEMU_PSCI_VERSION_1_1;
1119         break;
1120     case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
1121         ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */
1122         break;
1123     case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
1124     case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
1125         mpidr = param[1];
1126 
1127         switch (param[2]) {
1128         case 0:
1129             target_cpu_state = arm_get_cpu_by_id(mpidr);
1130             if (!target_cpu_state) {
1131                 ret = QEMU_PSCI_RET_INVALID_PARAMS;
1132                 break;
1133             }
1134             target_cpu = ARM_CPU(target_cpu_state);
1135 
1136             ret = target_cpu->power_state;
1137             break;
1138         default:
1139             /* Everything above affinity level 0 is always on. */
1140             ret = 0;
1141         }
1142         break;
1143     case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
1144         qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
1145         /*
1146          * QEMU reset and shutdown are async requests, but PSCI
1147          * mandates that we never return from the reset/shutdown
1148          * call, so power the CPU off now so it doesn't execute
1149          * anything further.
1150          */
1151         hvf_psci_cpu_off(arm_cpu);
1152         break;
1153     case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
1154         qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
1155         hvf_psci_cpu_off(arm_cpu);
1156         break;
1157     case QEMU_PSCI_0_1_FN_CPU_ON:
1158     case QEMU_PSCI_0_2_FN_CPU_ON:
1159     case QEMU_PSCI_0_2_FN64_CPU_ON:
1160         mpidr = param[1];
1161         entry = param[2];
1162         context_id = param[3];
1163         ret = arm_set_cpu_on(mpidr, entry, context_id,
1164                              target_el, target_aarch64);
1165         break;
1166     case QEMU_PSCI_0_1_FN_CPU_OFF:
1167     case QEMU_PSCI_0_2_FN_CPU_OFF:
1168         hvf_psci_cpu_off(arm_cpu);
1169         break;
1170     case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
1171     case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
1172     case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
1173         /* Affinity levels are not supported in QEMU */
1174         if (param[1] & 0xfffe0000) {
1175             ret = QEMU_PSCI_RET_INVALID_PARAMS;
1176             break;
1177         }
1178         /* Powerdown is not supported, we always go into WFI */
1179         env->xregs[0] = 0;
1180         hvf_wfi(cpu);
1181         break;
1182     case QEMU_PSCI_0_1_FN_MIGRATE:
1183     case QEMU_PSCI_0_2_FN_MIGRATE:
1184         ret = QEMU_PSCI_RET_NOT_SUPPORTED;
1185         break;
1186     case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
1187         switch (param[1]) {
1188         case QEMU_PSCI_0_2_FN_PSCI_VERSION:
1189         case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
1190         case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
1191         case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
1192         case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
1193         case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
1194         case QEMU_PSCI_0_1_FN_CPU_ON:
1195         case QEMU_PSCI_0_2_FN_CPU_ON:
1196         case QEMU_PSCI_0_2_FN64_CPU_ON:
1197         case QEMU_PSCI_0_1_FN_CPU_OFF:
1198         case QEMU_PSCI_0_2_FN_CPU_OFF:
1199         case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
1200         case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
1201         case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
1202         case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
1203             ret = 0;
1204             break;
1205         case QEMU_PSCI_0_1_FN_MIGRATE:
1206         case QEMU_PSCI_0_2_FN_MIGRATE:
1207         default:
1208             ret = QEMU_PSCI_RET_NOT_SUPPORTED;
1209         }
1210         break;
1211     default:
1212         return false;
1213     }
1214 
1215     env->xregs[0] = ret;
1216     return true;
1217 }
1218 
is_id_sysreg(uint32_t reg)1219 static bool is_id_sysreg(uint32_t reg)
1220 {
1221     return SYSREG_OP0(reg) == 3 &&
1222            SYSREG_OP1(reg) == 0 &&
1223            SYSREG_CRN(reg) == 0 &&
1224            SYSREG_CRM(reg) >= 1 &&
1225            SYSREG_CRM(reg) < 8;
1226 }
1227 
hvf_reg2cp_reg(uint32_t reg)1228 static uint32_t hvf_reg2cp_reg(uint32_t reg)
1229 {
1230     return ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1231                               (reg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK,
1232                               (reg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK,
1233                               (reg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK,
1234                               (reg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK,
1235                               (reg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK);
1236 }
1237 
hvf_sysreg_read_cp(CPUState * cpu,uint32_t reg,uint64_t * val)1238 static bool hvf_sysreg_read_cp(CPUState *cpu, uint32_t reg, uint64_t *val)
1239 {
1240     ARMCPU *arm_cpu = ARM_CPU(cpu);
1241     CPUARMState *env = &arm_cpu->env;
1242     const ARMCPRegInfo *ri;
1243 
1244     ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
1245     if (ri) {
1246         if (ri->accessfn) {
1247             if (ri->accessfn(env, ri, true) != CP_ACCESS_OK) {
1248                 return false;
1249             }
1250         }
1251         if (ri->type & ARM_CP_CONST) {
1252             *val = ri->resetvalue;
1253         } else if (ri->readfn) {
1254             *val = ri->readfn(env, ri);
1255         } else {
1256             *val = CPREG_FIELD64(env, ri);
1257         }
1258         trace_hvf_vgic_read(ri->name, *val);
1259         return true;
1260     }
1261 
1262     return false;
1263 }
1264 
hvf_sysreg_read(CPUState * cpu,uint32_t reg,uint64_t * val)1265 static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint64_t *val)
1266 {
1267     ARMCPU *arm_cpu = ARM_CPU(cpu);
1268     CPUARMState *env = &arm_cpu->env;
1269 
1270     if (arm_feature(env, ARM_FEATURE_PMU)) {
1271         switch (reg) {
1272         case SYSREG_PMCR_EL0:
1273             *val = env->cp15.c9_pmcr;
1274             return 0;
1275         case SYSREG_PMCCNTR_EL0:
1276             pmu_op_start(env);
1277             *val = env->cp15.c15_ccnt;
1278             pmu_op_finish(env);
1279             return 0;
1280         case SYSREG_PMCNTENCLR_EL0:
1281             *val = env->cp15.c9_pmcnten;
1282             return 0;
1283         case SYSREG_PMOVSCLR_EL0:
1284             *val = env->cp15.c9_pmovsr;
1285             return 0;
1286         case SYSREG_PMSELR_EL0:
1287             *val = env->cp15.c9_pmselr;
1288             return 0;
1289         case SYSREG_PMINTENCLR_EL1:
1290             *val = env->cp15.c9_pminten;
1291             return 0;
1292         case SYSREG_PMCCFILTR_EL0:
1293             *val = env->cp15.pmccfiltr_el0;
1294             return 0;
1295         case SYSREG_PMCNTENSET_EL0:
1296             *val = env->cp15.c9_pmcnten;
1297             return 0;
1298         case SYSREG_PMUSERENR_EL0:
1299             *val = env->cp15.c9_pmuserenr;
1300             return 0;
1301         case SYSREG_PMCEID0_EL0:
1302         case SYSREG_PMCEID1_EL0:
1303             /* We can't really count anything yet, declare all events invalid */
1304             *val = 0;
1305             return 0;
1306         }
1307     }
1308 
1309     switch (reg) {
1310     case SYSREG_CNTPCT_EL0:
1311         *val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
1312               gt_cntfrq_period_ns(arm_cpu);
1313         return 0;
1314     case SYSREG_OSLSR_EL1:
1315         *val = env->cp15.oslsr_el1;
1316         return 0;
1317     case SYSREG_OSDLR_EL1:
1318         /* Dummy register */
1319         return 0;
1320     case SYSREG_ICC_AP0R0_EL1:
1321     case SYSREG_ICC_AP0R1_EL1:
1322     case SYSREG_ICC_AP0R2_EL1:
1323     case SYSREG_ICC_AP0R3_EL1:
1324     case SYSREG_ICC_AP1R0_EL1:
1325     case SYSREG_ICC_AP1R1_EL1:
1326     case SYSREG_ICC_AP1R2_EL1:
1327     case SYSREG_ICC_AP1R3_EL1:
1328     case SYSREG_ICC_ASGI1R_EL1:
1329     case SYSREG_ICC_BPR0_EL1:
1330     case SYSREG_ICC_BPR1_EL1:
1331     case SYSREG_ICC_DIR_EL1:
1332     case SYSREG_ICC_EOIR0_EL1:
1333     case SYSREG_ICC_EOIR1_EL1:
1334     case SYSREG_ICC_HPPIR0_EL1:
1335     case SYSREG_ICC_HPPIR1_EL1:
1336     case SYSREG_ICC_IAR0_EL1:
1337     case SYSREG_ICC_IAR1_EL1:
1338     case SYSREG_ICC_IGRPEN0_EL1:
1339     case SYSREG_ICC_IGRPEN1_EL1:
1340     case SYSREG_ICC_PMR_EL1:
1341     case SYSREG_ICC_SGI0R_EL1:
1342     case SYSREG_ICC_SGI1R_EL1:
1343     case SYSREG_ICC_SRE_EL1:
1344     case SYSREG_ICC_CTLR_EL1:
1345         /* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
1346         if (hvf_sysreg_read_cp(cpu, reg, val)) {
1347             return 0;
1348         }
1349         break;
1350     case SYSREG_DBGBVR0_EL1:
1351     case SYSREG_DBGBVR1_EL1:
1352     case SYSREG_DBGBVR2_EL1:
1353     case SYSREG_DBGBVR3_EL1:
1354     case SYSREG_DBGBVR4_EL1:
1355     case SYSREG_DBGBVR5_EL1:
1356     case SYSREG_DBGBVR6_EL1:
1357     case SYSREG_DBGBVR7_EL1:
1358     case SYSREG_DBGBVR8_EL1:
1359     case SYSREG_DBGBVR9_EL1:
1360     case SYSREG_DBGBVR10_EL1:
1361     case SYSREG_DBGBVR11_EL1:
1362     case SYSREG_DBGBVR12_EL1:
1363     case SYSREG_DBGBVR13_EL1:
1364     case SYSREG_DBGBVR14_EL1:
1365     case SYSREG_DBGBVR15_EL1:
1366         *val = env->cp15.dbgbvr[SYSREG_CRM(reg)];
1367         return 0;
1368     case SYSREG_DBGBCR0_EL1:
1369     case SYSREG_DBGBCR1_EL1:
1370     case SYSREG_DBGBCR2_EL1:
1371     case SYSREG_DBGBCR3_EL1:
1372     case SYSREG_DBGBCR4_EL1:
1373     case SYSREG_DBGBCR5_EL1:
1374     case SYSREG_DBGBCR6_EL1:
1375     case SYSREG_DBGBCR7_EL1:
1376     case SYSREG_DBGBCR8_EL1:
1377     case SYSREG_DBGBCR9_EL1:
1378     case SYSREG_DBGBCR10_EL1:
1379     case SYSREG_DBGBCR11_EL1:
1380     case SYSREG_DBGBCR12_EL1:
1381     case SYSREG_DBGBCR13_EL1:
1382     case SYSREG_DBGBCR14_EL1:
1383     case SYSREG_DBGBCR15_EL1:
1384         *val = env->cp15.dbgbcr[SYSREG_CRM(reg)];
1385         return 0;
1386     case SYSREG_DBGWVR0_EL1:
1387     case SYSREG_DBGWVR1_EL1:
1388     case SYSREG_DBGWVR2_EL1:
1389     case SYSREG_DBGWVR3_EL1:
1390     case SYSREG_DBGWVR4_EL1:
1391     case SYSREG_DBGWVR5_EL1:
1392     case SYSREG_DBGWVR6_EL1:
1393     case SYSREG_DBGWVR7_EL1:
1394     case SYSREG_DBGWVR8_EL1:
1395     case SYSREG_DBGWVR9_EL1:
1396     case SYSREG_DBGWVR10_EL1:
1397     case SYSREG_DBGWVR11_EL1:
1398     case SYSREG_DBGWVR12_EL1:
1399     case SYSREG_DBGWVR13_EL1:
1400     case SYSREG_DBGWVR14_EL1:
1401     case SYSREG_DBGWVR15_EL1:
1402         *val = env->cp15.dbgwvr[SYSREG_CRM(reg)];
1403         return 0;
1404     case SYSREG_DBGWCR0_EL1:
1405     case SYSREG_DBGWCR1_EL1:
1406     case SYSREG_DBGWCR2_EL1:
1407     case SYSREG_DBGWCR3_EL1:
1408     case SYSREG_DBGWCR4_EL1:
1409     case SYSREG_DBGWCR5_EL1:
1410     case SYSREG_DBGWCR6_EL1:
1411     case SYSREG_DBGWCR7_EL1:
1412     case SYSREG_DBGWCR8_EL1:
1413     case SYSREG_DBGWCR9_EL1:
1414     case SYSREG_DBGWCR10_EL1:
1415     case SYSREG_DBGWCR11_EL1:
1416     case SYSREG_DBGWCR12_EL1:
1417     case SYSREG_DBGWCR13_EL1:
1418     case SYSREG_DBGWCR14_EL1:
1419     case SYSREG_DBGWCR15_EL1:
1420         *val = env->cp15.dbgwcr[SYSREG_CRM(reg)];
1421         return 0;
1422     default:
1423         if (is_id_sysreg(reg)) {
1424             /* ID system registers read as RES0 */
1425             *val = 0;
1426             return 0;
1427         }
1428     }
1429 
1430     cpu_synchronize_state(cpu);
1431     trace_hvf_unhandled_sysreg_read(env->pc, reg,
1432                                     SYSREG_OP0(reg),
1433                                     SYSREG_OP1(reg),
1434                                     SYSREG_CRN(reg),
1435                                     SYSREG_CRM(reg),
1436                                     SYSREG_OP2(reg));
1437     hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1438     return 1;
1439 }
1440 
pmu_update_irq(CPUARMState * env)1441 static void pmu_update_irq(CPUARMState *env)
1442 {
1443     ARMCPU *cpu = env_archcpu(env);
1444     qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1445             (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1446 }
1447 
pmu_event_supported(uint16_t number)1448 static bool pmu_event_supported(uint16_t number)
1449 {
1450     return false;
1451 }
1452 
1453 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1454  * the current EL, security state, and register configuration.
1455  */
pmu_counter_enabled(CPUARMState * env,uint8_t counter)1456 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1457 {
1458     uint64_t filter;
1459     bool enabled, filtered = true;
1460     int el = arm_current_el(env);
1461 
1462     enabled = (env->cp15.c9_pmcr & PMCRE) &&
1463               (env->cp15.c9_pmcnten & (1 << counter));
1464 
1465     if (counter == 31) {
1466         filter = env->cp15.pmccfiltr_el0;
1467     } else {
1468         filter = env->cp15.c14_pmevtyper[counter];
1469     }
1470 
1471     if (el == 0) {
1472         filtered = filter & PMXEVTYPER_U;
1473     } else if (el == 1) {
1474         filtered = filter & PMXEVTYPER_P;
1475     }
1476 
1477     if (counter != 31) {
1478         /*
1479          * If not checking PMCCNTR, ensure the counter is setup to an event we
1480          * support
1481          */
1482         uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1483         if (!pmu_event_supported(event)) {
1484             return false;
1485         }
1486     }
1487 
1488     return enabled && !filtered;
1489 }
1490 
pmswinc_write(CPUARMState * env,uint64_t value)1491 static void pmswinc_write(CPUARMState *env, uint64_t value)
1492 {
1493     unsigned int i;
1494     for (i = 0; i < pmu_num_counters(env); i++) {
1495         /* Increment a counter's count iff: */
1496         if ((value & (1 << i)) && /* counter's bit is set */
1497                 /* counter is enabled and not filtered */
1498                 pmu_counter_enabled(env, i) &&
1499                 /* counter is SW_INCR */
1500                 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1501             /*
1502              * Detect if this write causes an overflow since we can't predict
1503              * PMSWINC overflows like we can for other events
1504              */
1505             uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1506 
1507             if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1508                 env->cp15.c9_pmovsr |= (1 << i);
1509                 pmu_update_irq(env);
1510             }
1511 
1512             env->cp15.c14_pmevcntr[i] = new_pmswinc;
1513         }
1514     }
1515 }
1516 
hvf_sysreg_write_cp(CPUState * cpu,uint32_t reg,uint64_t val)1517 static bool hvf_sysreg_write_cp(CPUState *cpu, uint32_t reg, uint64_t val)
1518 {
1519     ARMCPU *arm_cpu = ARM_CPU(cpu);
1520     CPUARMState *env = &arm_cpu->env;
1521     const ARMCPRegInfo *ri;
1522 
1523     ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
1524 
1525     if (ri) {
1526         if (ri->accessfn) {
1527             if (ri->accessfn(env, ri, false) != CP_ACCESS_OK) {
1528                 return false;
1529             }
1530         }
1531         if (ri->writefn) {
1532             ri->writefn(env, ri, val);
1533         } else {
1534             CPREG_FIELD64(env, ri) = val;
1535         }
1536 
1537         trace_hvf_vgic_write(ri->name, val);
1538         return true;
1539     }
1540 
1541     return false;
1542 }
1543 
hvf_sysreg_write(CPUState * cpu,uint32_t reg,uint64_t val)1544 static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
1545 {
1546     ARMCPU *arm_cpu = ARM_CPU(cpu);
1547     CPUARMState *env = &arm_cpu->env;
1548 
1549     trace_hvf_sysreg_write(reg,
1550                            SYSREG_OP0(reg),
1551                            SYSREG_OP1(reg),
1552                            SYSREG_CRN(reg),
1553                            SYSREG_CRM(reg),
1554                            SYSREG_OP2(reg),
1555                            val);
1556 
1557     if (arm_feature(env, ARM_FEATURE_PMU)) {
1558         switch (reg) {
1559         case SYSREG_PMCCNTR_EL0:
1560             pmu_op_start(env);
1561             env->cp15.c15_ccnt = val;
1562             pmu_op_finish(env);
1563             return 0;
1564         case SYSREG_PMCR_EL0:
1565             pmu_op_start(env);
1566 
1567             if (val & PMCRC) {
1568                 /* The counter has been reset */
1569                 env->cp15.c15_ccnt = 0;
1570             }
1571 
1572             if (val & PMCRP) {
1573                 unsigned int i;
1574                 for (i = 0; i < pmu_num_counters(env); i++) {
1575                     env->cp15.c14_pmevcntr[i] = 0;
1576                 }
1577             }
1578 
1579             env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
1580             env->cp15.c9_pmcr |= (val & PMCR_WRITABLE_MASK);
1581 
1582             pmu_op_finish(env);
1583             return 0;
1584         case SYSREG_PMUSERENR_EL0:
1585             env->cp15.c9_pmuserenr = val & 0xf;
1586             return 0;
1587         case SYSREG_PMCNTENSET_EL0:
1588             env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env));
1589             return 0;
1590         case SYSREG_PMCNTENCLR_EL0:
1591             env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env));
1592             return 0;
1593         case SYSREG_PMINTENCLR_EL1:
1594             pmu_op_start(env);
1595             env->cp15.c9_pminten |= val;
1596             pmu_op_finish(env);
1597             return 0;
1598         case SYSREG_PMOVSCLR_EL0:
1599             pmu_op_start(env);
1600             env->cp15.c9_pmovsr &= ~val;
1601             pmu_op_finish(env);
1602             return 0;
1603         case SYSREG_PMSWINC_EL0:
1604             pmu_op_start(env);
1605             pmswinc_write(env, val);
1606             pmu_op_finish(env);
1607             return 0;
1608         case SYSREG_PMSELR_EL0:
1609             env->cp15.c9_pmselr = val & 0x1f;
1610             return 0;
1611         case SYSREG_PMCCFILTR_EL0:
1612             pmu_op_start(env);
1613             env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0;
1614             pmu_op_finish(env);
1615             return 0;
1616         }
1617     }
1618 
1619     switch (reg) {
1620     case SYSREG_OSLAR_EL1:
1621         env->cp15.oslsr_el1 = val & 1;
1622         return 0;
1623     case SYSREG_OSDLR_EL1:
1624         /* Dummy register */
1625         return 0;
1626     case SYSREG_ICC_AP0R0_EL1:
1627     case SYSREG_ICC_AP0R1_EL1:
1628     case SYSREG_ICC_AP0R2_EL1:
1629     case SYSREG_ICC_AP0R3_EL1:
1630     case SYSREG_ICC_AP1R0_EL1:
1631     case SYSREG_ICC_AP1R1_EL1:
1632     case SYSREG_ICC_AP1R2_EL1:
1633     case SYSREG_ICC_AP1R3_EL1:
1634     case SYSREG_ICC_ASGI1R_EL1:
1635     case SYSREG_ICC_BPR0_EL1:
1636     case SYSREG_ICC_BPR1_EL1:
1637     case SYSREG_ICC_CTLR_EL1:
1638     case SYSREG_ICC_DIR_EL1:
1639     case SYSREG_ICC_EOIR0_EL1:
1640     case SYSREG_ICC_EOIR1_EL1:
1641     case SYSREG_ICC_HPPIR0_EL1:
1642     case SYSREG_ICC_HPPIR1_EL1:
1643     case SYSREG_ICC_IAR0_EL1:
1644     case SYSREG_ICC_IAR1_EL1:
1645     case SYSREG_ICC_IGRPEN0_EL1:
1646     case SYSREG_ICC_IGRPEN1_EL1:
1647     case SYSREG_ICC_PMR_EL1:
1648     case SYSREG_ICC_SGI0R_EL1:
1649     case SYSREG_ICC_SGI1R_EL1:
1650     case SYSREG_ICC_SRE_EL1:
1651         /* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
1652         if (hvf_sysreg_write_cp(cpu, reg, val)) {
1653             return 0;
1654         }
1655         break;
1656     case SYSREG_MDSCR_EL1:
1657         env->cp15.mdscr_el1 = val;
1658         return 0;
1659     case SYSREG_DBGBVR0_EL1:
1660     case SYSREG_DBGBVR1_EL1:
1661     case SYSREG_DBGBVR2_EL1:
1662     case SYSREG_DBGBVR3_EL1:
1663     case SYSREG_DBGBVR4_EL1:
1664     case SYSREG_DBGBVR5_EL1:
1665     case SYSREG_DBGBVR6_EL1:
1666     case SYSREG_DBGBVR7_EL1:
1667     case SYSREG_DBGBVR8_EL1:
1668     case SYSREG_DBGBVR9_EL1:
1669     case SYSREG_DBGBVR10_EL1:
1670     case SYSREG_DBGBVR11_EL1:
1671     case SYSREG_DBGBVR12_EL1:
1672     case SYSREG_DBGBVR13_EL1:
1673     case SYSREG_DBGBVR14_EL1:
1674     case SYSREG_DBGBVR15_EL1:
1675         env->cp15.dbgbvr[SYSREG_CRM(reg)] = val;
1676         return 0;
1677     case SYSREG_DBGBCR0_EL1:
1678     case SYSREG_DBGBCR1_EL1:
1679     case SYSREG_DBGBCR2_EL1:
1680     case SYSREG_DBGBCR3_EL1:
1681     case SYSREG_DBGBCR4_EL1:
1682     case SYSREG_DBGBCR5_EL1:
1683     case SYSREG_DBGBCR6_EL1:
1684     case SYSREG_DBGBCR7_EL1:
1685     case SYSREG_DBGBCR8_EL1:
1686     case SYSREG_DBGBCR9_EL1:
1687     case SYSREG_DBGBCR10_EL1:
1688     case SYSREG_DBGBCR11_EL1:
1689     case SYSREG_DBGBCR12_EL1:
1690     case SYSREG_DBGBCR13_EL1:
1691     case SYSREG_DBGBCR14_EL1:
1692     case SYSREG_DBGBCR15_EL1:
1693         env->cp15.dbgbcr[SYSREG_CRM(reg)] = val;
1694         return 0;
1695     case SYSREG_DBGWVR0_EL1:
1696     case SYSREG_DBGWVR1_EL1:
1697     case SYSREG_DBGWVR2_EL1:
1698     case SYSREG_DBGWVR3_EL1:
1699     case SYSREG_DBGWVR4_EL1:
1700     case SYSREG_DBGWVR5_EL1:
1701     case SYSREG_DBGWVR6_EL1:
1702     case SYSREG_DBGWVR7_EL1:
1703     case SYSREG_DBGWVR8_EL1:
1704     case SYSREG_DBGWVR9_EL1:
1705     case SYSREG_DBGWVR10_EL1:
1706     case SYSREG_DBGWVR11_EL1:
1707     case SYSREG_DBGWVR12_EL1:
1708     case SYSREG_DBGWVR13_EL1:
1709     case SYSREG_DBGWVR14_EL1:
1710     case SYSREG_DBGWVR15_EL1:
1711         env->cp15.dbgwvr[SYSREG_CRM(reg)] = val;
1712         return 0;
1713     case SYSREG_DBGWCR0_EL1:
1714     case SYSREG_DBGWCR1_EL1:
1715     case SYSREG_DBGWCR2_EL1:
1716     case SYSREG_DBGWCR3_EL1:
1717     case SYSREG_DBGWCR4_EL1:
1718     case SYSREG_DBGWCR5_EL1:
1719     case SYSREG_DBGWCR6_EL1:
1720     case SYSREG_DBGWCR7_EL1:
1721     case SYSREG_DBGWCR8_EL1:
1722     case SYSREG_DBGWCR9_EL1:
1723     case SYSREG_DBGWCR10_EL1:
1724     case SYSREG_DBGWCR11_EL1:
1725     case SYSREG_DBGWCR12_EL1:
1726     case SYSREG_DBGWCR13_EL1:
1727     case SYSREG_DBGWCR14_EL1:
1728     case SYSREG_DBGWCR15_EL1:
1729         env->cp15.dbgwcr[SYSREG_CRM(reg)] = val;
1730         return 0;
1731     }
1732 
1733     cpu_synchronize_state(cpu);
1734     trace_hvf_unhandled_sysreg_write(env->pc, reg,
1735                                      SYSREG_OP0(reg),
1736                                      SYSREG_OP1(reg),
1737                                      SYSREG_CRN(reg),
1738                                      SYSREG_CRM(reg),
1739                                      SYSREG_OP2(reg));
1740     hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1741     return 1;
1742 }
1743 
hvf_inject_interrupts(CPUState * cpu)1744 static int hvf_inject_interrupts(CPUState *cpu)
1745 {
1746     if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) {
1747         trace_hvf_inject_fiq();
1748         hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_FIQ,
1749                                       true);
1750     }
1751 
1752     if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
1753         trace_hvf_inject_irq();
1754         hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_IRQ,
1755                                       true);
1756     }
1757 
1758     return 0;
1759 }
1760 
hvf_vtimer_val_raw(void)1761 static uint64_t hvf_vtimer_val_raw(void)
1762 {
1763     /*
1764      * mach_absolute_time() returns the vtimer value without the VM
1765      * offset that we define. Add our own offset on top.
1766      */
1767     return mach_absolute_time() - hvf_state->vtimer_offset;
1768 }
1769 
hvf_vtimer_val(void)1770 static uint64_t hvf_vtimer_val(void)
1771 {
1772     if (!runstate_is_running()) {
1773         /* VM is paused, the vtimer value is in vtimer.vtimer_val */
1774         return vtimer.vtimer_val;
1775     }
1776 
1777     return hvf_vtimer_val_raw();
1778 }
1779 
hvf_wait_for_ipi(CPUState * cpu,struct timespec * ts)1780 static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
1781 {
1782     /*
1783      * Use pselect to sleep so that other threads can IPI us while we're
1784      * sleeping.
1785      */
1786     qatomic_set_mb(&cpu->thread_kicked, false);
1787     bql_unlock();
1788     pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask);
1789     bql_lock();
1790 }
1791 
hvf_wfi(CPUState * cpu)1792 static void hvf_wfi(CPUState *cpu)
1793 {
1794     ARMCPU *arm_cpu = ARM_CPU(cpu);
1795     struct timespec ts;
1796     hv_return_t r;
1797     uint64_t ctl;
1798     uint64_t cval;
1799     int64_t ticks_to_sleep;
1800     uint64_t seconds;
1801     uint64_t nanos;
1802     uint32_t cntfrq;
1803 
1804     if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) {
1805         /* Interrupt pending, no need to wait */
1806         return;
1807     }
1808 
1809     r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
1810     assert_hvf_ok(r);
1811 
1812     if (!(ctl & 1) || (ctl & 2)) {
1813         /* Timer disabled or masked, just wait for an IPI. */
1814         hvf_wait_for_ipi(cpu, NULL);
1815         return;
1816     }
1817 
1818     r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
1819     assert_hvf_ok(r);
1820 
1821     ticks_to_sleep = cval - hvf_vtimer_val();
1822     if (ticks_to_sleep < 0) {
1823         return;
1824     }
1825 
1826     cntfrq = gt_cntfrq_period_ns(arm_cpu);
1827     seconds = muldiv64(ticks_to_sleep, cntfrq, NANOSECONDS_PER_SECOND);
1828     ticks_to_sleep -= muldiv64(seconds, NANOSECONDS_PER_SECOND, cntfrq);
1829     nanos = ticks_to_sleep * cntfrq;
1830 
1831     /*
1832      * Don't sleep for less than the time a context switch would take,
1833      * so that we can satisfy fast timer requests on the same CPU.
1834      * Measurements on M1 show the sweet spot to be ~2ms.
1835      */
1836     if (!seconds && nanos < (2 * SCALE_MS)) {
1837         return;
1838     }
1839 
1840     ts = (struct timespec) { seconds, nanos };
1841     hvf_wait_for_ipi(cpu, &ts);
1842 }
1843 
hvf_sync_vtimer(CPUState * cpu)1844 static void hvf_sync_vtimer(CPUState *cpu)
1845 {
1846     ARMCPU *arm_cpu = ARM_CPU(cpu);
1847     hv_return_t r;
1848     uint64_t ctl;
1849     bool irq_state;
1850 
1851     if (!cpu->accel->vtimer_masked) {
1852         /* We will get notified on vtimer changes by hvf, nothing to do */
1853         return;
1854     }
1855 
1856     r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
1857     assert_hvf_ok(r);
1858 
1859     irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) ==
1860                 (TMR_CTL_ENABLE | TMR_CTL_ISTATUS);
1861     qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], irq_state);
1862 
1863     if (!irq_state) {
1864         /* Timer no longer asserting, we can unmask it */
1865         hv_vcpu_set_vtimer_mask(cpu->accel->fd, false);
1866         cpu->accel->vtimer_masked = false;
1867     }
1868 }
1869 
hvf_vcpu_exec(CPUState * cpu)1870 int hvf_vcpu_exec(CPUState *cpu)
1871 {
1872     ARMCPU *arm_cpu = ARM_CPU(cpu);
1873     CPUARMState *env = &arm_cpu->env;
1874     int ret;
1875     hv_vcpu_exit_t *hvf_exit = cpu->accel->exit;
1876     hv_return_t r;
1877     bool advance_pc = false;
1878 
1879     if (!(cpu->singlestep_enabled & SSTEP_NOIRQ) &&
1880         hvf_inject_interrupts(cpu)) {
1881         return EXCP_INTERRUPT;
1882     }
1883 
1884     if (cpu->halted) {
1885         return EXCP_HLT;
1886     }
1887 
1888     flush_cpu_state(cpu);
1889 
1890     bql_unlock();
1891     assert_hvf_ok(hv_vcpu_run(cpu->accel->fd));
1892 
1893     /* handle VMEXIT */
1894     uint64_t exit_reason = hvf_exit->reason;
1895     uint64_t syndrome = hvf_exit->exception.syndrome;
1896     uint32_t ec = syn_get_ec(syndrome);
1897 
1898     ret = 0;
1899     bql_lock();
1900     switch (exit_reason) {
1901     case HV_EXIT_REASON_EXCEPTION:
1902         /* This is the main one, handle below. */
1903         break;
1904     case HV_EXIT_REASON_VTIMER_ACTIVATED:
1905         qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1);
1906         cpu->accel->vtimer_masked = true;
1907         return 0;
1908     case HV_EXIT_REASON_CANCELED:
1909         /* we got kicked, no exit to process */
1910         return 0;
1911     default:
1912         g_assert_not_reached();
1913     }
1914 
1915     hvf_sync_vtimer(cpu);
1916 
1917     switch (ec) {
1918     case EC_SOFTWARESTEP: {
1919         ret = EXCP_DEBUG;
1920 
1921         if (!cpu->singlestep_enabled) {
1922             error_report("EC_SOFTWARESTEP but single-stepping not enabled");
1923         }
1924         break;
1925     }
1926     case EC_AA64_BKPT: {
1927         ret = EXCP_DEBUG;
1928 
1929         cpu_synchronize_state(cpu);
1930 
1931         if (!hvf_find_sw_breakpoint(cpu, env->pc)) {
1932             /* Re-inject into the guest */
1933             ret = 0;
1934             hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0));
1935         }
1936         break;
1937     }
1938     case EC_BREAKPOINT: {
1939         ret = EXCP_DEBUG;
1940 
1941         cpu_synchronize_state(cpu);
1942 
1943         if (!find_hw_breakpoint(cpu, env->pc)) {
1944             error_report("EC_BREAKPOINT but unknown hw breakpoint");
1945         }
1946         break;
1947     }
1948     case EC_WATCHPOINT: {
1949         ret = EXCP_DEBUG;
1950 
1951         cpu_synchronize_state(cpu);
1952 
1953         CPUWatchpoint *wp =
1954             find_hw_watchpoint(cpu, hvf_exit->exception.virtual_address);
1955         if (!wp) {
1956             error_report("EXCP_DEBUG but unknown hw watchpoint");
1957         }
1958         cpu->watchpoint_hit = wp;
1959         break;
1960     }
1961     case EC_DATAABORT: {
1962         bool isv = syndrome & ARM_EL_ISV;
1963         bool iswrite = (syndrome >> 6) & 1;
1964         bool s1ptw = (syndrome >> 7) & 1;
1965         uint32_t sas = (syndrome >> 22) & 3;
1966         uint32_t len = 1 << sas;
1967         uint32_t srt = (syndrome >> 16) & 0x1f;
1968         uint32_t cm = (syndrome >> 8) & 0x1;
1969         uint64_t val = 0;
1970 
1971         trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address,
1972                              hvf_exit->exception.physical_address, isv,
1973                              iswrite, s1ptw, len, srt);
1974 
1975         if (cm) {
1976             /* We don't cache MMIO regions */
1977             advance_pc = true;
1978             break;
1979         }
1980 
1981         assert(isv);
1982 
1983         if (iswrite) {
1984             val = hvf_get_reg(cpu, srt);
1985             address_space_write(&address_space_memory,
1986                                 hvf_exit->exception.physical_address,
1987                                 MEMTXATTRS_UNSPECIFIED, &val, len);
1988         } else {
1989             address_space_read(&address_space_memory,
1990                                hvf_exit->exception.physical_address,
1991                                MEMTXATTRS_UNSPECIFIED, &val, len);
1992             hvf_set_reg(cpu, srt, val);
1993         }
1994 
1995         advance_pc = true;
1996         break;
1997     }
1998     case EC_SYSTEMREGISTERTRAP: {
1999         bool isread = (syndrome >> 0) & 1;
2000         uint32_t rt = (syndrome >> 5) & 0x1f;
2001         uint32_t reg = syndrome & SYSREG_MASK;
2002         uint64_t val;
2003         int sysreg_ret = 0;
2004 
2005         if (isread) {
2006             sysreg_ret = hvf_sysreg_read(cpu, reg, &val);
2007             if (!sysreg_ret) {
2008                 trace_hvf_sysreg_read(reg,
2009                                       SYSREG_OP0(reg),
2010                                       SYSREG_OP1(reg),
2011                                       SYSREG_CRN(reg),
2012                                       SYSREG_CRM(reg),
2013                                       SYSREG_OP2(reg),
2014                                       val);
2015                 hvf_set_reg(cpu, rt, val);
2016             }
2017         } else {
2018             val = hvf_get_reg(cpu, rt);
2019             sysreg_ret = hvf_sysreg_write(cpu, reg, val);
2020         }
2021 
2022         advance_pc = !sysreg_ret;
2023         break;
2024     }
2025     case EC_WFX_TRAP:
2026         advance_pc = true;
2027         if (!(syndrome & WFX_IS_WFE)) {
2028             hvf_wfi(cpu);
2029         }
2030         break;
2031     case EC_AA64_HVC:
2032         cpu_synchronize_state(cpu);
2033         if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) {
2034             if (!hvf_handle_psci_call(cpu)) {
2035                 trace_hvf_unknown_hvc(env->xregs[0]);
2036                 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
2037                 env->xregs[0] = -1;
2038             }
2039         } else {
2040             trace_hvf_unknown_hvc(env->xregs[0]);
2041             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
2042         }
2043         break;
2044     case EC_AA64_SMC:
2045         cpu_synchronize_state(cpu);
2046         if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_SMC) {
2047             advance_pc = true;
2048 
2049             if (!hvf_handle_psci_call(cpu)) {
2050                 trace_hvf_unknown_smc(env->xregs[0]);
2051                 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
2052                 env->xregs[0] = -1;
2053             }
2054         } else {
2055             trace_hvf_unknown_smc(env->xregs[0]);
2056             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
2057         }
2058         break;
2059     default:
2060         cpu_synchronize_state(cpu);
2061         trace_hvf_exit(syndrome, ec, env->pc);
2062         error_report("0x%llx: unhandled exception ec=0x%x", env->pc, ec);
2063     }
2064 
2065     if (advance_pc) {
2066         uint64_t pc;
2067 
2068         flush_cpu_state(cpu);
2069 
2070         r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_PC, &pc);
2071         assert_hvf_ok(r);
2072         pc += 4;
2073         r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_PC, pc);
2074         assert_hvf_ok(r);
2075 
2076         /* Handle single-stepping over instructions which trigger a VM exit */
2077         if (cpu->singlestep_enabled) {
2078             ret = EXCP_DEBUG;
2079         }
2080     }
2081 
2082     return ret;
2083 }
2084 
2085 static const VMStateDescription vmstate_hvf_vtimer = {
2086     .name = "hvf-vtimer",
2087     .version_id = 1,
2088     .minimum_version_id = 1,
2089     .fields = (const VMStateField[]) {
2090         VMSTATE_UINT64(vtimer_val, HVFVTimer),
2091         VMSTATE_END_OF_LIST()
2092     },
2093 };
2094 
hvf_vm_state_change(void * opaque,bool running,RunState state)2095 static void hvf_vm_state_change(void *opaque, bool running, RunState state)
2096 {
2097     HVFVTimer *s = opaque;
2098 
2099     if (running) {
2100         /* Update vtimer offset on all CPUs */
2101         hvf_state->vtimer_offset = mach_absolute_time() - s->vtimer_val;
2102         cpu_synchronize_all_states();
2103     } else {
2104         /* Remember vtimer value on every pause */
2105         s->vtimer_val = hvf_vtimer_val_raw();
2106     }
2107 }
2108 
hvf_arch_init(void)2109 int hvf_arch_init(void)
2110 {
2111     hvf_state->vtimer_offset = mach_absolute_time();
2112     vmstate_register(NULL, 0, &vmstate_hvf_vtimer, &vtimer);
2113     qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer);
2114 
2115     hvf_arm_init_debug();
2116 
2117     return 0;
2118 }
2119 
2120 static const uint32_t brk_insn = 0xd4200000;
2121 
hvf_arch_insert_sw_breakpoint(CPUState * cpu,struct hvf_sw_breakpoint * bp)2122 int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp)
2123 {
2124     if (cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
2125         cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
2126         return -EINVAL;
2127     }
2128     return 0;
2129 }
2130 
hvf_arch_remove_sw_breakpoint(CPUState * cpu,struct hvf_sw_breakpoint * bp)2131 int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp)
2132 {
2133     static uint32_t brk;
2134 
2135     if (cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&brk, 4, 0) ||
2136         brk != brk_insn ||
2137         cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
2138         return -EINVAL;
2139     }
2140     return 0;
2141 }
2142 
hvf_arch_insert_hw_breakpoint(vaddr addr,vaddr len,int type)2143 int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
2144 {
2145     switch (type) {
2146     case GDB_BREAKPOINT_HW:
2147         return insert_hw_breakpoint(addr);
2148     case GDB_WATCHPOINT_READ:
2149     case GDB_WATCHPOINT_WRITE:
2150     case GDB_WATCHPOINT_ACCESS:
2151         return insert_hw_watchpoint(addr, len, type);
2152     default:
2153         return -ENOSYS;
2154     }
2155 }
2156 
hvf_arch_remove_hw_breakpoint(vaddr addr,vaddr len,int type)2157 int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
2158 {
2159     switch (type) {
2160     case GDB_BREAKPOINT_HW:
2161         return delete_hw_breakpoint(addr);
2162     case GDB_WATCHPOINT_READ:
2163     case GDB_WATCHPOINT_WRITE:
2164     case GDB_WATCHPOINT_ACCESS:
2165         return delete_hw_watchpoint(addr, len, type);
2166     default:
2167         return -ENOSYS;
2168     }
2169 }
2170 
hvf_arch_remove_all_hw_breakpoints(void)2171 void hvf_arch_remove_all_hw_breakpoints(void)
2172 {
2173     if (cur_hw_wps > 0) {
2174         g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
2175     }
2176     if (cur_hw_bps > 0) {
2177         g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
2178     }
2179 }
2180 
2181 /*
2182  * Update the vCPU with the gdbstub's view of debug registers. This view
2183  * consists of all hardware breakpoints and watchpoints inserted so far while
2184  * debugging the guest.
2185  */
hvf_put_gdbstub_debug_registers(CPUState * cpu)2186 static void hvf_put_gdbstub_debug_registers(CPUState *cpu)
2187 {
2188     hv_return_t r = HV_SUCCESS;
2189     int i;
2190 
2191     for (i = 0; i < cur_hw_bps; i++) {
2192         HWBreakpoint *bp = get_hw_bp(i);
2193         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], bp->bcr);
2194         assert_hvf_ok(r);
2195         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], bp->bvr);
2196         assert_hvf_ok(r);
2197     }
2198     for (i = cur_hw_bps; i < max_hw_bps; i++) {
2199         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], 0);
2200         assert_hvf_ok(r);
2201         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], 0);
2202         assert_hvf_ok(r);
2203     }
2204 
2205     for (i = 0; i < cur_hw_wps; i++) {
2206         HWWatchpoint *wp = get_hw_wp(i);
2207         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], wp->wcr);
2208         assert_hvf_ok(r);
2209         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], wp->wvr);
2210         assert_hvf_ok(r);
2211     }
2212     for (i = cur_hw_wps; i < max_hw_wps; i++) {
2213         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], 0);
2214         assert_hvf_ok(r);
2215         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], 0);
2216         assert_hvf_ok(r);
2217     }
2218 }
2219 
2220 /*
2221  * Update the vCPU with the guest's view of debug registers. This view is kept
2222  * in the environment at all times.
2223  */
hvf_put_guest_debug_registers(CPUState * cpu)2224 static void hvf_put_guest_debug_registers(CPUState *cpu)
2225 {
2226     ARMCPU *arm_cpu = ARM_CPU(cpu);
2227     CPUARMState *env = &arm_cpu->env;
2228     hv_return_t r = HV_SUCCESS;
2229     int i;
2230 
2231     for (i = 0; i < max_hw_bps; i++) {
2232         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i],
2233                                 env->cp15.dbgbcr[i]);
2234         assert_hvf_ok(r);
2235         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i],
2236                                 env->cp15.dbgbvr[i]);
2237         assert_hvf_ok(r);
2238     }
2239 
2240     for (i = 0; i < max_hw_wps; i++) {
2241         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i],
2242                                 env->cp15.dbgwcr[i]);
2243         assert_hvf_ok(r);
2244         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i],
2245                                 env->cp15.dbgwvr[i]);
2246         assert_hvf_ok(r);
2247     }
2248 }
2249 
hvf_arm_hw_debug_active(CPUState * cpu)2250 static inline bool hvf_arm_hw_debug_active(CPUState *cpu)
2251 {
2252     return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
2253 }
2254 
hvf_arch_set_traps(void)2255 static void hvf_arch_set_traps(void)
2256 {
2257     CPUState *cpu;
2258     bool should_enable_traps = false;
2259     hv_return_t r = HV_SUCCESS;
2260 
2261     /* Check whether guest debugging is enabled for at least one vCPU; if it
2262      * is, enable exiting the guest on all vCPUs */
2263     CPU_FOREACH(cpu) {
2264         should_enable_traps |= cpu->accel->guest_debug_enabled;
2265     }
2266     CPU_FOREACH(cpu) {
2267         /* Set whether debug exceptions exit the guest */
2268         r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd,
2269                                               should_enable_traps);
2270         assert_hvf_ok(r);
2271 
2272         /* Set whether accesses to debug registers exit the guest */
2273         r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd,
2274                                                 should_enable_traps);
2275         assert_hvf_ok(r);
2276     }
2277 }
2278 
hvf_arch_update_guest_debug(CPUState * cpu)2279 void hvf_arch_update_guest_debug(CPUState *cpu)
2280 {
2281     ARMCPU *arm_cpu = ARM_CPU(cpu);
2282     CPUARMState *env = &arm_cpu->env;
2283 
2284     /* Check whether guest debugging is enabled */
2285     cpu->accel->guest_debug_enabled = cpu->singlestep_enabled ||
2286                                     hvf_sw_breakpoints_active(cpu) ||
2287                                     hvf_arm_hw_debug_active(cpu);
2288 
2289     /* Update debug registers */
2290     if (cpu->accel->guest_debug_enabled) {
2291         hvf_put_gdbstub_debug_registers(cpu);
2292     } else {
2293         hvf_put_guest_debug_registers(cpu);
2294     }
2295 
2296     cpu_synchronize_state(cpu);
2297 
2298     /* Enable/disable single-stepping */
2299     if (cpu->singlestep_enabled) {
2300         env->cp15.mdscr_el1 =
2301             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_SS_SHIFT, 1, 1);
2302         pstate_write(env, pstate_read(env) | PSTATE_SS);
2303     } else {
2304         env->cp15.mdscr_el1 =
2305             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_SS_SHIFT, 1, 0);
2306     }
2307 
2308     /* Enable/disable Breakpoint exceptions */
2309     if (hvf_arm_hw_debug_active(cpu)) {
2310         env->cp15.mdscr_el1 =
2311             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 1);
2312     } else {
2313         env->cp15.mdscr_el1 =
2314             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 0);
2315     }
2316 
2317     hvf_arch_set_traps();
2318 }
2319 
hvf_arch_supports_guest_debug(void)2320 bool hvf_arch_supports_guest_debug(void)
2321 {
2322     return true;
2323 }
2324