1 /*
2 * ARM generic helpers.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "target/arm/idau.h"
12 #include "trace.h"
13 #include "cpu.h"
14 #include "internals.h"
15 #include "exec/gdbstub.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include <zlib.h> /* For crc32 */
24 #include "hw/irq.h"
25 #include "hw/semihosting/semihost.h"
26 #include "sysemu/cpus.h"
27 #include "sysemu/kvm.h"
28 #include "qemu/range.h"
29 #include "qapi/qapi-commands-machine-target.h"
30 #include "qapi/error.h"
31 #include "qemu/guest-random.h"
32 #ifdef CONFIG_TCG
33 #include "arm_ldst.h"
34 #include "exec/cpu_ldst.h"
35 #endif
36
37 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
38
39 #ifndef CONFIG_USER_ONLY
40
41 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
42 MMUAccessType access_type, ARMMMUIdx mmu_idx,
43 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
44 target_ulong *page_size_ptr,
45 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
46 #endif
47
48 static void switch_mode(CPUARMState *env, int mode);
49
vfp_gdb_get_reg(CPUARMState * env,uint8_t * buf,int reg)50 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
51 {
52 int nregs;
53
54 /* VFP data registers are always little-endian. */
55 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
56 if (reg < nregs) {
57 stq_le_p(buf, *aa32_vfp_dreg(env, reg));
58 return 8;
59 }
60 if (arm_feature(env, ARM_FEATURE_NEON)) {
61 /* Aliases for Q regs. */
62 nregs += 16;
63 if (reg < nregs) {
64 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
65 stq_le_p(buf, q[0]);
66 stq_le_p(buf + 8, q[1]);
67 return 16;
68 }
69 }
70 switch (reg - nregs) {
71 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
72 case 1: stl_p(buf, vfp_get_fpscr(env)); return 4;
73 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
74 }
75 return 0;
76 }
77
vfp_gdb_set_reg(CPUARMState * env,uint8_t * buf,int reg)78 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
79 {
80 int nregs;
81
82 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
83 if (reg < nregs) {
84 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
85 return 8;
86 }
87 if (arm_feature(env, ARM_FEATURE_NEON)) {
88 nregs += 16;
89 if (reg < nregs) {
90 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
91 q[0] = ldq_le_p(buf);
92 q[1] = ldq_le_p(buf + 8);
93 return 16;
94 }
95 }
96 switch (reg - nregs) {
97 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
98 case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4;
99 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
100 }
101 return 0;
102 }
103
aarch64_fpu_gdb_get_reg(CPUARMState * env,uint8_t * buf,int reg)104 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
105 {
106 switch (reg) {
107 case 0 ... 31:
108 /* 128 bit FP register */
109 {
110 uint64_t *q = aa64_vfp_qreg(env, reg);
111 stq_le_p(buf, q[0]);
112 stq_le_p(buf + 8, q[1]);
113 return 16;
114 }
115 case 32:
116 /* FPSR */
117 stl_p(buf, vfp_get_fpsr(env));
118 return 4;
119 case 33:
120 /* FPCR */
121 stl_p(buf, vfp_get_fpcr(env));
122 return 4;
123 default:
124 return 0;
125 }
126 }
127
aarch64_fpu_gdb_set_reg(CPUARMState * env,uint8_t * buf,int reg)128 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
129 {
130 switch (reg) {
131 case 0 ... 31:
132 /* 128 bit FP register */
133 {
134 uint64_t *q = aa64_vfp_qreg(env, reg);
135 q[0] = ldq_le_p(buf);
136 q[1] = ldq_le_p(buf + 8);
137 return 16;
138 }
139 case 32:
140 /* FPSR */
141 vfp_set_fpsr(env, ldl_p(buf));
142 return 4;
143 case 33:
144 /* FPCR */
145 vfp_set_fpcr(env, ldl_p(buf));
146 return 4;
147 default:
148 return 0;
149 }
150 }
151
raw_read(CPUARMState * env,const ARMCPRegInfo * ri)152 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
153 {
154 assert(ri->fieldoffset);
155 if (cpreg_field_is_64bit(ri)) {
156 return CPREG_FIELD64(env, ri);
157 } else {
158 return CPREG_FIELD32(env, ri);
159 }
160 }
161
raw_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)162 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
163 uint64_t value)
164 {
165 assert(ri->fieldoffset);
166 if (cpreg_field_is_64bit(ri)) {
167 CPREG_FIELD64(env, ri) = value;
168 } else {
169 CPREG_FIELD32(env, ri) = value;
170 }
171 }
172
raw_ptr(CPUARMState * env,const ARMCPRegInfo * ri)173 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
174 {
175 return (char *)env + ri->fieldoffset;
176 }
177
read_raw_cp_reg(CPUARMState * env,const ARMCPRegInfo * ri)178 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
179 {
180 /* Raw read of a coprocessor register (as needed for migration, etc). */
181 if (ri->type & ARM_CP_CONST) {
182 return ri->resetvalue;
183 } else if (ri->raw_readfn) {
184 return ri->raw_readfn(env, ri);
185 } else if (ri->readfn) {
186 return ri->readfn(env, ri);
187 } else {
188 return raw_read(env, ri);
189 }
190 }
191
write_raw_cp_reg(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t v)192 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
193 uint64_t v)
194 {
195 /* Raw write of a coprocessor register (as needed for migration, etc).
196 * Note that constant registers are treated as write-ignored; the
197 * caller should check for success by whether a readback gives the
198 * value written.
199 */
200 if (ri->type & ARM_CP_CONST) {
201 return;
202 } else if (ri->raw_writefn) {
203 ri->raw_writefn(env, ri, v);
204 } else if (ri->writefn) {
205 ri->writefn(env, ri, v);
206 } else {
207 raw_write(env, ri, v);
208 }
209 }
210
arm_gdb_get_sysreg(CPUARMState * env,uint8_t * buf,int reg)211 static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg)
212 {
213 ARMCPU *cpu = env_archcpu(env);
214 const ARMCPRegInfo *ri;
215 uint32_t key;
216
217 key = cpu->dyn_xml.cpregs_keys[reg];
218 ri = get_arm_cp_reginfo(cpu->cp_regs, key);
219 if (ri) {
220 if (cpreg_field_is_64bit(ri)) {
221 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
222 } else {
223 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
224 }
225 }
226 return 0;
227 }
228
arm_gdb_set_sysreg(CPUARMState * env,uint8_t * buf,int reg)229 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
230 {
231 return 0;
232 }
233
raw_accessors_invalid(const ARMCPRegInfo * ri)234 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
235 {
236 /* Return true if the regdef would cause an assertion if you called
237 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
238 * program bug for it not to have the NO_RAW flag).
239 * NB that returning false here doesn't necessarily mean that calling
240 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
241 * read/write access functions which are safe for raw use" from "has
242 * read/write access functions which have side effects but has forgotten
243 * to provide raw access functions".
244 * The tests here line up with the conditions in read/write_raw_cp_reg()
245 * and assertions in raw_read()/raw_write().
246 */
247 if ((ri->type & ARM_CP_CONST) ||
248 ri->fieldoffset ||
249 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
250 return false;
251 }
252 return true;
253 }
254
write_cpustate_to_list(ARMCPU * cpu,bool kvm_sync)255 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
256 {
257 /* Write the coprocessor state from cpu->env to the (index,value) list. */
258 int i;
259 bool ok = true;
260
261 for (i = 0; i < cpu->cpreg_array_len; i++) {
262 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
263 const ARMCPRegInfo *ri;
264 uint64_t newval;
265
266 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
267 if (!ri) {
268 ok = false;
269 continue;
270 }
271 if (ri->type & ARM_CP_NO_RAW) {
272 continue;
273 }
274
275 newval = read_raw_cp_reg(&cpu->env, ri);
276 if (kvm_sync) {
277 /*
278 * Only sync if the previous list->cpustate sync succeeded.
279 * Rather than tracking the success/failure state for every
280 * item in the list, we just recheck "does the raw write we must
281 * have made in write_list_to_cpustate() read back OK" here.
282 */
283 uint64_t oldval = cpu->cpreg_values[i];
284
285 if (oldval == newval) {
286 continue;
287 }
288
289 write_raw_cp_reg(&cpu->env, ri, oldval);
290 if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
291 continue;
292 }
293
294 write_raw_cp_reg(&cpu->env, ri, newval);
295 }
296 cpu->cpreg_values[i] = newval;
297 }
298 return ok;
299 }
300
write_list_to_cpustate(ARMCPU * cpu)301 bool write_list_to_cpustate(ARMCPU *cpu)
302 {
303 int i;
304 bool ok = true;
305
306 for (i = 0; i < cpu->cpreg_array_len; i++) {
307 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
308 uint64_t v = cpu->cpreg_values[i];
309 const ARMCPRegInfo *ri;
310
311 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
312 if (!ri) {
313 ok = false;
314 continue;
315 }
316 if (ri->type & ARM_CP_NO_RAW) {
317 continue;
318 }
319 /* Write value and confirm it reads back as written
320 * (to catch read-only registers and partially read-only
321 * registers where the incoming migration value doesn't match)
322 */
323 write_raw_cp_reg(&cpu->env, ri, v);
324 if (read_raw_cp_reg(&cpu->env, ri) != v) {
325 ok = false;
326 }
327 }
328 return ok;
329 }
330
add_cpreg_to_list(gpointer key,gpointer opaque)331 static void add_cpreg_to_list(gpointer key, gpointer opaque)
332 {
333 ARMCPU *cpu = opaque;
334 uint64_t regidx;
335 const ARMCPRegInfo *ri;
336
337 regidx = *(uint32_t *)key;
338 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
339
340 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
341 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
342 /* The value array need not be initialized at this point */
343 cpu->cpreg_array_len++;
344 }
345 }
346
count_cpreg(gpointer key,gpointer opaque)347 static void count_cpreg(gpointer key, gpointer opaque)
348 {
349 ARMCPU *cpu = opaque;
350 uint64_t regidx;
351 const ARMCPRegInfo *ri;
352
353 regidx = *(uint32_t *)key;
354 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
355
356 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
357 cpu->cpreg_array_len++;
358 }
359 }
360
cpreg_key_compare(gconstpointer a,gconstpointer b)361 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
362 {
363 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
364 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
365
366 if (aidx > bidx) {
367 return 1;
368 }
369 if (aidx < bidx) {
370 return -1;
371 }
372 return 0;
373 }
374
init_cpreg_list(ARMCPU * cpu)375 void init_cpreg_list(ARMCPU *cpu)
376 {
377 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
378 * Note that we require cpreg_tuples[] to be sorted by key ID.
379 */
380 GList *keys;
381 int arraylen;
382
383 keys = g_hash_table_get_keys(cpu->cp_regs);
384 keys = g_list_sort(keys, cpreg_key_compare);
385
386 cpu->cpreg_array_len = 0;
387
388 g_list_foreach(keys, count_cpreg, cpu);
389
390 arraylen = cpu->cpreg_array_len;
391 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
392 cpu->cpreg_values = g_new(uint64_t, arraylen);
393 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
394 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
395 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
396 cpu->cpreg_array_len = 0;
397
398 g_list_foreach(keys, add_cpreg_to_list, cpu);
399
400 assert(cpu->cpreg_array_len == arraylen);
401
402 g_list_free(keys);
403 }
404
405 /*
406 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
407 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
408 *
409 * access_el3_aa32ns: Used to check AArch32 register views.
410 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
411 */
access_el3_aa32ns(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)412 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
413 const ARMCPRegInfo *ri,
414 bool isread)
415 {
416 bool secure = arm_is_secure_below_el3(env);
417
418 assert(!arm_el_is_aa64(env, 3));
419 if (secure) {
420 return CP_ACCESS_TRAP_UNCATEGORIZED;
421 }
422 return CP_ACCESS_OK;
423 }
424
access_el3_aa32ns_aa64any(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)425 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
426 const ARMCPRegInfo *ri,
427 bool isread)
428 {
429 if (!arm_el_is_aa64(env, 3)) {
430 return access_el3_aa32ns(env, ri, isread);
431 }
432 return CP_ACCESS_OK;
433 }
434
435 /* Some secure-only AArch32 registers trap to EL3 if used from
436 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
437 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
438 * We assume that the .access field is set to PL1_RW.
439 */
access_trap_aa32s_el1(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)440 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
441 const ARMCPRegInfo *ri,
442 bool isread)
443 {
444 if (arm_current_el(env) == 3) {
445 return CP_ACCESS_OK;
446 }
447 if (arm_is_secure_below_el3(env)) {
448 return CP_ACCESS_TRAP_EL3;
449 }
450 /* This will be EL1 NS and EL2 NS, which just UNDEF */
451 return CP_ACCESS_TRAP_UNCATEGORIZED;
452 }
453
454 /* Check for traps to "powerdown debug" registers, which are controlled
455 * by MDCR.TDOSA
456 */
access_tdosa(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)457 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
458 bool isread)
459 {
460 int el = arm_current_el(env);
461 bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
462 (env->cp15.mdcr_el2 & MDCR_TDE) ||
463 (arm_hcr_el2_eff(env) & HCR_TGE);
464
465 if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
466 return CP_ACCESS_TRAP_EL2;
467 }
468 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
469 return CP_ACCESS_TRAP_EL3;
470 }
471 return CP_ACCESS_OK;
472 }
473
474 /* Check for traps to "debug ROM" registers, which are controlled
475 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
476 */
access_tdra(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)477 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
478 bool isread)
479 {
480 int el = arm_current_el(env);
481 bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
482 (env->cp15.mdcr_el2 & MDCR_TDE) ||
483 (arm_hcr_el2_eff(env) & HCR_TGE);
484
485 if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
486 return CP_ACCESS_TRAP_EL2;
487 }
488 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
489 return CP_ACCESS_TRAP_EL3;
490 }
491 return CP_ACCESS_OK;
492 }
493
494 /* Check for traps to general debug registers, which are controlled
495 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
496 */
access_tda(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)497 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
498 bool isread)
499 {
500 int el = arm_current_el(env);
501 bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
502 (env->cp15.mdcr_el2 & MDCR_TDE) ||
503 (arm_hcr_el2_eff(env) & HCR_TGE);
504
505 if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
506 return CP_ACCESS_TRAP_EL2;
507 }
508 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
509 return CP_ACCESS_TRAP_EL3;
510 }
511 return CP_ACCESS_OK;
512 }
513
514 /* Check for traps to performance monitor registers, which are controlled
515 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
516 */
access_tpm(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)517 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
518 bool isread)
519 {
520 int el = arm_current_el(env);
521
522 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
523 && !arm_is_secure_below_el3(env)) {
524 return CP_ACCESS_TRAP_EL2;
525 }
526 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
527 return CP_ACCESS_TRAP_EL3;
528 }
529 return CP_ACCESS_OK;
530 }
531
dacr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)532 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
533 {
534 ARMCPU *cpu = env_archcpu(env);
535
536 raw_write(env, ri, value);
537 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
538 }
539
fcse_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)540 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
541 {
542 ARMCPU *cpu = env_archcpu(env);
543
544 if (raw_read(env, ri) != value) {
545 /* Unlike real hardware the qemu TLB uses virtual addresses,
546 * not modified virtual addresses, so this causes a TLB flush.
547 */
548 tlb_flush(CPU(cpu));
549 raw_write(env, ri, value);
550 }
551 }
552
contextidr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)553 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
554 uint64_t value)
555 {
556 ARMCPU *cpu = env_archcpu(env);
557
558 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
559 && !extended_addresses_enabled(env)) {
560 /* For VMSA (when not using the LPAE long descriptor page table
561 * format) this register includes the ASID, so do a TLB flush.
562 * For PMSA it is purely a process ID and no action is needed.
563 */
564 tlb_flush(CPU(cpu));
565 }
566 raw_write(env, ri, value);
567 }
568
569 /* IS variants of TLB operations must affect all cores */
tlbiall_is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)570 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
571 uint64_t value)
572 {
573 CPUState *cs = env_cpu(env);
574
575 tlb_flush_all_cpus_synced(cs);
576 }
577
tlbiasid_is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)578 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
579 uint64_t value)
580 {
581 CPUState *cs = env_cpu(env);
582
583 tlb_flush_all_cpus_synced(cs);
584 }
585
tlbimva_is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)586 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
587 uint64_t value)
588 {
589 CPUState *cs = env_cpu(env);
590
591 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
592 }
593
tlbimvaa_is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)594 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
595 uint64_t value)
596 {
597 CPUState *cs = env_cpu(env);
598
599 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
600 }
601
602 /*
603 * Non-IS variants of TLB operations are upgraded to
604 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
605 * force broadcast of these operations.
606 */
tlb_force_broadcast(CPUARMState * env)607 static bool tlb_force_broadcast(CPUARMState *env)
608 {
609 return (env->cp15.hcr_el2 & HCR_FB) &&
610 arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
611 }
612
tlbiall_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)613 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
614 uint64_t value)
615 {
616 /* Invalidate all (TLBIALL) */
617 ARMCPU *cpu = env_archcpu(env);
618
619 if (tlb_force_broadcast(env)) {
620 tlbiall_is_write(env, NULL, value);
621 return;
622 }
623
624 tlb_flush(CPU(cpu));
625 }
626
tlbimva_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)627 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
628 uint64_t value)
629 {
630 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
631 ARMCPU *cpu = env_archcpu(env);
632
633 if (tlb_force_broadcast(env)) {
634 tlbimva_is_write(env, NULL, value);
635 return;
636 }
637
638 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
639 }
640
tlbiasid_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)641 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
642 uint64_t value)
643 {
644 /* Invalidate by ASID (TLBIASID) */
645 ARMCPU *cpu = env_archcpu(env);
646
647 if (tlb_force_broadcast(env)) {
648 tlbiasid_is_write(env, NULL, value);
649 return;
650 }
651
652 tlb_flush(CPU(cpu));
653 }
654
tlbimvaa_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)655 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
656 uint64_t value)
657 {
658 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
659 ARMCPU *cpu = env_archcpu(env);
660
661 if (tlb_force_broadcast(env)) {
662 tlbimvaa_is_write(env, NULL, value);
663 return;
664 }
665
666 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
667 }
668
tlbiall_nsnh_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)669 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
670 uint64_t value)
671 {
672 CPUState *cs = env_cpu(env);
673
674 tlb_flush_by_mmuidx(cs,
675 ARMMMUIdxBit_S12NSE1 |
676 ARMMMUIdxBit_S12NSE0 |
677 ARMMMUIdxBit_S2NS);
678 }
679
tlbiall_nsnh_is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)680 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
681 uint64_t value)
682 {
683 CPUState *cs = env_cpu(env);
684
685 tlb_flush_by_mmuidx_all_cpus_synced(cs,
686 ARMMMUIdxBit_S12NSE1 |
687 ARMMMUIdxBit_S12NSE0 |
688 ARMMMUIdxBit_S2NS);
689 }
690
tlbiipas2_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)691 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
692 uint64_t value)
693 {
694 /* Invalidate by IPA. This has to invalidate any structures that
695 * contain only stage 2 translation information, but does not need
696 * to apply to structures that contain combined stage 1 and stage 2
697 * translation information.
698 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
699 */
700 CPUState *cs = env_cpu(env);
701 uint64_t pageaddr;
702
703 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
704 return;
705 }
706
707 pageaddr = sextract64(value << 12, 0, 40);
708
709 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
710 }
711
tlbiipas2_is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)712 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
713 uint64_t value)
714 {
715 CPUState *cs = env_cpu(env);
716 uint64_t pageaddr;
717
718 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
719 return;
720 }
721
722 pageaddr = sextract64(value << 12, 0, 40);
723
724 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
725 ARMMMUIdxBit_S2NS);
726 }
727
tlbiall_hyp_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)728 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
729 uint64_t value)
730 {
731 CPUState *cs = env_cpu(env);
732
733 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
734 }
735
tlbiall_hyp_is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)736 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
737 uint64_t value)
738 {
739 CPUState *cs = env_cpu(env);
740
741 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
742 }
743
tlbimva_hyp_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)744 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
745 uint64_t value)
746 {
747 CPUState *cs = env_cpu(env);
748 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
749
750 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
751 }
752
tlbimva_hyp_is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)753 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
754 uint64_t value)
755 {
756 CPUState *cs = env_cpu(env);
757 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
758
759 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
760 ARMMMUIdxBit_S1E2);
761 }
762
763 static const ARMCPRegInfo cp_reginfo[] = {
764 /* Define the secure and non-secure FCSE identifier CP registers
765 * separately because there is no secure bank in V8 (no _EL3). This allows
766 * the secure register to be properly reset and migrated. There is also no
767 * v8 EL1 version of the register so the non-secure instance stands alone.
768 */
769 { .name = "FCSEIDR",
770 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
771 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
772 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
773 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
774 { .name = "FCSEIDR_S",
775 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
776 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
777 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
778 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
779 /* Define the secure and non-secure context identifier CP registers
780 * separately because there is no secure bank in V8 (no _EL3). This allows
781 * the secure register to be properly reset and migrated. In the
782 * non-secure case, the 32-bit register will have reset and migration
783 * disabled during registration as it is handled by the 64-bit instance.
784 */
785 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
786 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
787 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
788 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
789 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
790 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
791 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
792 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
793 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
794 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
795 REGINFO_SENTINEL
796 };
797
798 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
799 /* NB: Some of these registers exist in v8 but with more precise
800 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
801 */
802 /* MMU Domain access control / MPU write buffer control */
803 { .name = "DACR",
804 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
805 .access = PL1_RW, .resetvalue = 0,
806 .writefn = dacr_write, .raw_writefn = raw_write,
807 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
808 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
809 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
810 * For v6 and v5, these mappings are overly broad.
811 */
812 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
813 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
814 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
815 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
816 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
817 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
818 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
819 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
820 /* Cache maintenance ops; some of this space may be overridden later. */
821 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
822 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
823 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
824 REGINFO_SENTINEL
825 };
826
827 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
828 /* Not all pre-v6 cores implemented this WFI, so this is slightly
829 * over-broad.
830 */
831 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
832 .access = PL1_W, .type = ARM_CP_WFI },
833 REGINFO_SENTINEL
834 };
835
836 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
837 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
838 * is UNPREDICTABLE; we choose to NOP as most implementations do).
839 */
840 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
841 .access = PL1_W, .type = ARM_CP_WFI },
842 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
843 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
844 * OMAPCP will override this space.
845 */
846 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
847 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
848 .resetvalue = 0 },
849 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
850 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
851 .resetvalue = 0 },
852 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
853 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
854 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
855 .resetvalue = 0 },
856 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
857 * implementing it as RAZ means the "debug architecture version" bits
858 * will read as a reserved value, which should cause Linux to not try
859 * to use the debug hardware.
860 */
861 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
862 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
863 /* MMU TLB control. Note that the wildcarding means we cover not just
864 * the unified TLB ops but also the dside/iside/inner-shareable variants.
865 */
866 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
867 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
868 .type = ARM_CP_NO_RAW },
869 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
870 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
871 .type = ARM_CP_NO_RAW },
872 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
873 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
874 .type = ARM_CP_NO_RAW },
875 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
876 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
877 .type = ARM_CP_NO_RAW },
878 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
879 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
880 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
881 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
882 REGINFO_SENTINEL
883 };
884
cpacr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)885 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
886 uint64_t value)
887 {
888 uint32_t mask = 0;
889
890 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
891 if (!arm_feature(env, ARM_FEATURE_V8)) {
892 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
893 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
894 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
895 */
896 if (arm_feature(env, ARM_FEATURE_VFP)) {
897 /* VFP coprocessor: cp10 & cp11 [23:20] */
898 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
899
900 if (!arm_feature(env, ARM_FEATURE_NEON)) {
901 /* ASEDIS [31] bit is RAO/WI */
902 value |= (1 << 31);
903 }
904
905 /* VFPv3 and upwards with NEON implement 32 double precision
906 * registers (D0-D31).
907 */
908 if (!arm_feature(env, ARM_FEATURE_NEON) ||
909 !arm_feature(env, ARM_FEATURE_VFP3)) {
910 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
911 value |= (1 << 30);
912 }
913 }
914 value &= mask;
915 }
916
917 /*
918 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
919 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
920 */
921 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
922 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
923 value &= ~(0xf << 20);
924 value |= env->cp15.cpacr_el1 & (0xf << 20);
925 }
926
927 env->cp15.cpacr_el1 = value;
928 }
929
cpacr_read(CPUARMState * env,const ARMCPRegInfo * ri)930 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
931 {
932 /*
933 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
934 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
935 */
936 uint64_t value = env->cp15.cpacr_el1;
937
938 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
939 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
940 value &= ~(0xf << 20);
941 }
942 return value;
943 }
944
945
cpacr_reset(CPUARMState * env,const ARMCPRegInfo * ri)946 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
947 {
948 /* Call cpacr_write() so that we reset with the correct RAO bits set
949 * for our CPU features.
950 */
951 cpacr_write(env, ri, 0);
952 }
953
cpacr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)954 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
955 bool isread)
956 {
957 if (arm_feature(env, ARM_FEATURE_V8)) {
958 /* Check if CPACR accesses are to be trapped to EL2 */
959 if (arm_current_el(env) == 1 &&
960 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
961 return CP_ACCESS_TRAP_EL2;
962 /* Check if CPACR accesses are to be trapped to EL3 */
963 } else if (arm_current_el(env) < 3 &&
964 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
965 return CP_ACCESS_TRAP_EL3;
966 }
967 }
968
969 return CP_ACCESS_OK;
970 }
971
cptr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)972 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
973 bool isread)
974 {
975 /* Check if CPTR accesses are set to trap to EL3 */
976 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
977 return CP_ACCESS_TRAP_EL3;
978 }
979
980 return CP_ACCESS_OK;
981 }
982
983 static const ARMCPRegInfo v6_cp_reginfo[] = {
984 /* prefetch by MVA in v6, NOP in v7 */
985 { .name = "MVA_prefetch",
986 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
987 .access = PL1_W, .type = ARM_CP_NOP },
988 /* We need to break the TB after ISB to execute self-modifying code
989 * correctly and also to take any pending interrupts immediately.
990 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
991 */
992 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
993 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
994 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
995 .access = PL0_W, .type = ARM_CP_NOP },
996 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
997 .access = PL0_W, .type = ARM_CP_NOP },
998 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
999 .access = PL1_RW,
1000 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
1001 offsetof(CPUARMState, cp15.ifar_ns) },
1002 .resetvalue = 0, },
1003 /* Watchpoint Fault Address Register : should actually only be present
1004 * for 1136, 1176, 11MPCore.
1005 */
1006 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
1007 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
1008 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
1009 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
1010 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
1011 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
1012 REGINFO_SENTINEL
1013 };
1014
1015 /* Definitions for the PMU registers */
1016 #define PMCRN_MASK 0xf800
1017 #define PMCRN_SHIFT 11
1018 #define PMCRLC 0x40
1019 #define PMCRDP 0x20
1020 #define PMCRX 0x10
1021 #define PMCRD 0x8
1022 #define PMCRC 0x4
1023 #define PMCRP 0x2
1024 #define PMCRE 0x1
1025
1026 #define PMXEVTYPER_P 0x80000000
1027 #define PMXEVTYPER_U 0x40000000
1028 #define PMXEVTYPER_NSK 0x20000000
1029 #define PMXEVTYPER_NSU 0x10000000
1030 #define PMXEVTYPER_NSH 0x08000000
1031 #define PMXEVTYPER_M 0x04000000
1032 #define PMXEVTYPER_MT 0x02000000
1033 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1034 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1035 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1036 PMXEVTYPER_M | PMXEVTYPER_MT | \
1037 PMXEVTYPER_EVTCOUNT)
1038
1039 #define PMCCFILTR 0xf8000000
1040 #define PMCCFILTR_M PMXEVTYPER_M
1041 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1042
pmu_num_counters(CPUARMState * env)1043 static inline uint32_t pmu_num_counters(CPUARMState *env)
1044 {
1045 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
1046 }
1047
1048 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
pmu_counter_mask(CPUARMState * env)1049 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1050 {
1051 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
1052 }
1053
1054 typedef struct pm_event {
1055 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
1056 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1057 bool (*supported)(CPUARMState *);
1058 /*
1059 * Retrieve the current count of the underlying event. The programmed
1060 * counters hold a difference from the return value from this function
1061 */
1062 uint64_t (*get_count)(CPUARMState *);
1063 /*
1064 * Return how many nanoseconds it will take (at a minimum) for count events
1065 * to occur. A negative value indicates the counter will never overflow, or
1066 * that the counter has otherwise arranged for the overflow bit to be set
1067 * and the PMU interrupt to be raised on overflow.
1068 */
1069 int64_t (*ns_per_count)(uint64_t);
1070 } pm_event;
1071
event_always_supported(CPUARMState * env)1072 static bool event_always_supported(CPUARMState *env)
1073 {
1074 return true;
1075 }
1076
swinc_get_count(CPUARMState * env)1077 static uint64_t swinc_get_count(CPUARMState *env)
1078 {
1079 /*
1080 * SW_INCR events are written directly to the pmevcntr's by writes to
1081 * PMSWINC, so there is no underlying count maintained by the PMU itself
1082 */
1083 return 0;
1084 }
1085
swinc_ns_per(uint64_t ignored)1086 static int64_t swinc_ns_per(uint64_t ignored)
1087 {
1088 return -1;
1089 }
1090
1091 /*
1092 * Return the underlying cycle count for the PMU cycle counters. If we're in
1093 * usermode, simply return 0.
1094 */
cycles_get_count(CPUARMState * env)1095 static uint64_t cycles_get_count(CPUARMState *env)
1096 {
1097 #ifndef CONFIG_USER_ONLY
1098 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1099 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1100 #else
1101 return cpu_get_host_ticks();
1102 #endif
1103 }
1104
1105 #ifndef CONFIG_USER_ONLY
cycles_ns_per(uint64_t cycles)1106 static int64_t cycles_ns_per(uint64_t cycles)
1107 {
1108 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
1109 }
1110
instructions_supported(CPUARMState * env)1111 static bool instructions_supported(CPUARMState *env)
1112 {
1113 return use_icount == 1 /* Precise instruction counting */;
1114 }
1115
instructions_get_count(CPUARMState * env)1116 static uint64_t instructions_get_count(CPUARMState *env)
1117 {
1118 return (uint64_t)cpu_get_icount_raw();
1119 }
1120
instructions_ns_per(uint64_t icount)1121 static int64_t instructions_ns_per(uint64_t icount)
1122 {
1123 return cpu_icount_to_ns((int64_t)icount);
1124 }
1125 #endif
1126
1127 static const pm_event pm_events[] = {
1128 { .number = 0x000, /* SW_INCR */
1129 .supported = event_always_supported,
1130 .get_count = swinc_get_count,
1131 .ns_per_count = swinc_ns_per,
1132 },
1133 #ifndef CONFIG_USER_ONLY
1134 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
1135 .supported = instructions_supported,
1136 .get_count = instructions_get_count,
1137 .ns_per_count = instructions_ns_per,
1138 },
1139 { .number = 0x011, /* CPU_CYCLES, Cycle */
1140 .supported = event_always_supported,
1141 .get_count = cycles_get_count,
1142 .ns_per_count = cycles_ns_per,
1143 }
1144 #endif
1145 };
1146
1147 /*
1148 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1149 * events (i.e. the statistical profiling extension), this implementation
1150 * should first be updated to something sparse instead of the current
1151 * supported_event_map[] array.
1152 */
1153 #define MAX_EVENT_ID 0x11
1154 #define UNSUPPORTED_EVENT UINT16_MAX
1155 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1156
1157 /*
1158 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1159 * of ARM event numbers to indices in our pm_events array.
1160 *
1161 * Note: Events in the 0x40XX range are not currently supported.
1162 */
pmu_init(ARMCPU * cpu)1163 void pmu_init(ARMCPU *cpu)
1164 {
1165 unsigned int i;
1166
1167 /*
1168 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1169 * events to them
1170 */
1171 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1172 supported_event_map[i] = UNSUPPORTED_EVENT;
1173 }
1174 cpu->pmceid0 = 0;
1175 cpu->pmceid1 = 0;
1176
1177 for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1178 const pm_event *cnt = &pm_events[i];
1179 assert(cnt->number <= MAX_EVENT_ID);
1180 /* We do not currently support events in the 0x40xx range */
1181 assert(cnt->number <= 0x3f);
1182
1183 if (cnt->supported(&cpu->env)) {
1184 supported_event_map[cnt->number] = i;
1185 uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1186 if (cnt->number & 0x20) {
1187 cpu->pmceid1 |= event_mask;
1188 } else {
1189 cpu->pmceid0 |= event_mask;
1190 }
1191 }
1192 }
1193 }
1194
1195 /*
1196 * Check at runtime whether a PMU event is supported for the current machine
1197 */
event_supported(uint16_t number)1198 static bool event_supported(uint16_t number)
1199 {
1200 if (number > MAX_EVENT_ID) {
1201 return false;
1202 }
1203 return supported_event_map[number] != UNSUPPORTED_EVENT;
1204 }
1205
pmreg_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1206 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1207 bool isread)
1208 {
1209 /* Performance monitor registers user accessibility is controlled
1210 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1211 * trapping to EL2 or EL3 for other accesses.
1212 */
1213 int el = arm_current_el(env);
1214
1215 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1216 return CP_ACCESS_TRAP;
1217 }
1218 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
1219 && !arm_is_secure_below_el3(env)) {
1220 return CP_ACCESS_TRAP_EL2;
1221 }
1222 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1223 return CP_ACCESS_TRAP_EL3;
1224 }
1225
1226 return CP_ACCESS_OK;
1227 }
1228
pmreg_access_xevcntr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1229 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1230 const ARMCPRegInfo *ri,
1231 bool isread)
1232 {
1233 /* ER: event counter read trap control */
1234 if (arm_feature(env, ARM_FEATURE_V8)
1235 && arm_current_el(env) == 0
1236 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1237 && isread) {
1238 return CP_ACCESS_OK;
1239 }
1240
1241 return pmreg_access(env, ri, isread);
1242 }
1243
pmreg_access_swinc(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1244 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1245 const ARMCPRegInfo *ri,
1246 bool isread)
1247 {
1248 /* SW: software increment write trap control */
1249 if (arm_feature(env, ARM_FEATURE_V8)
1250 && arm_current_el(env) == 0
1251 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1252 && !isread) {
1253 return CP_ACCESS_OK;
1254 }
1255
1256 return pmreg_access(env, ri, isread);
1257 }
1258
pmreg_access_selr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1259 static CPAccessResult pmreg_access_selr(CPUARMState *env,
1260 const ARMCPRegInfo *ri,
1261 bool isread)
1262 {
1263 /* ER: event counter read trap control */
1264 if (arm_feature(env, ARM_FEATURE_V8)
1265 && arm_current_el(env) == 0
1266 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1267 return CP_ACCESS_OK;
1268 }
1269
1270 return pmreg_access(env, ri, isread);
1271 }
1272
pmreg_access_ccntr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)1273 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1274 const ARMCPRegInfo *ri,
1275 bool isread)
1276 {
1277 /* CR: cycle counter read trap control */
1278 if (arm_feature(env, ARM_FEATURE_V8)
1279 && arm_current_el(env) == 0
1280 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1281 && isread) {
1282 return CP_ACCESS_OK;
1283 }
1284
1285 return pmreg_access(env, ri, isread);
1286 }
1287
1288 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1289 * the current EL, security state, and register configuration.
1290 */
pmu_counter_enabled(CPUARMState * env,uint8_t counter)1291 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1292 {
1293 uint64_t filter;
1294 bool e, p, u, nsk, nsu, nsh, m;
1295 bool enabled, prohibited, filtered;
1296 bool secure = arm_is_secure(env);
1297 int el = arm_current_el(env);
1298 uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1299
1300 if (!arm_feature(env, ARM_FEATURE_PMU)) {
1301 return false;
1302 }
1303
1304 if (!arm_feature(env, ARM_FEATURE_EL2) ||
1305 (counter < hpmn || counter == 31)) {
1306 e = env->cp15.c9_pmcr & PMCRE;
1307 } else {
1308 e = env->cp15.mdcr_el2 & MDCR_HPME;
1309 }
1310 enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1311
1312 if (!secure) {
1313 if (el == 2 && (counter < hpmn || counter == 31)) {
1314 prohibited = env->cp15.mdcr_el2 & MDCR_HPMD;
1315 } else {
1316 prohibited = false;
1317 }
1318 } else {
1319 prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
1320 (env->cp15.mdcr_el3 & MDCR_SPME);
1321 }
1322
1323 if (prohibited && counter == 31) {
1324 prohibited = env->cp15.c9_pmcr & PMCRDP;
1325 }
1326
1327 if (counter == 31) {
1328 filter = env->cp15.pmccfiltr_el0;
1329 } else {
1330 filter = env->cp15.c14_pmevtyper[counter];
1331 }
1332
1333 p = filter & PMXEVTYPER_P;
1334 u = filter & PMXEVTYPER_U;
1335 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1336 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1337 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1338 m = arm_el_is_aa64(env, 1) &&
1339 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1340
1341 if (el == 0) {
1342 filtered = secure ? u : u != nsu;
1343 } else if (el == 1) {
1344 filtered = secure ? p : p != nsk;
1345 } else if (el == 2) {
1346 filtered = !nsh;
1347 } else { /* EL3 */
1348 filtered = m != p;
1349 }
1350
1351 if (counter != 31) {
1352 /*
1353 * If not checking PMCCNTR, ensure the counter is setup to an event we
1354 * support
1355 */
1356 uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1357 if (!event_supported(event)) {
1358 return false;
1359 }
1360 }
1361
1362 return enabled && !prohibited && !filtered;
1363 }
1364
pmu_update_irq(CPUARMState * env)1365 static void pmu_update_irq(CPUARMState *env)
1366 {
1367 ARMCPU *cpu = env_archcpu(env);
1368 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1369 (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1370 }
1371
1372 /*
1373 * Ensure c15_ccnt is the guest-visible count so that operations such as
1374 * enabling/disabling the counter or filtering, modifying the count itself,
1375 * etc. can be done logically. This is essentially a no-op if the counter is
1376 * not enabled at the time of the call.
1377 */
pmccntr_op_start(CPUARMState * env)1378 static void pmccntr_op_start(CPUARMState *env)
1379 {
1380 uint64_t cycles = cycles_get_count(env);
1381
1382 if (pmu_counter_enabled(env, 31)) {
1383 uint64_t eff_cycles = cycles;
1384 if (env->cp15.c9_pmcr & PMCRD) {
1385 /* Increment once every 64 processor clock cycles */
1386 eff_cycles /= 64;
1387 }
1388
1389 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1390
1391 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1392 1ull << 63 : 1ull << 31;
1393 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1394 env->cp15.c9_pmovsr |= (1 << 31);
1395 pmu_update_irq(env);
1396 }
1397
1398 env->cp15.c15_ccnt = new_pmccntr;
1399 }
1400 env->cp15.c15_ccnt_delta = cycles;
1401 }
1402
1403 /*
1404 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1405 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1406 * pmccntr_op_start.
1407 */
pmccntr_op_finish(CPUARMState * env)1408 static void pmccntr_op_finish(CPUARMState *env)
1409 {
1410 if (pmu_counter_enabled(env, 31)) {
1411 #ifndef CONFIG_USER_ONLY
1412 /* Calculate when the counter will next overflow */
1413 uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1414 if (!(env->cp15.c9_pmcr & PMCRLC)) {
1415 remaining_cycles = (uint32_t)remaining_cycles;
1416 }
1417 int64_t overflow_in = cycles_ns_per(remaining_cycles);
1418
1419 if (overflow_in > 0) {
1420 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1421 overflow_in;
1422 ARMCPU *cpu = env_archcpu(env);
1423 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1424 }
1425 #endif
1426
1427 uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1428 if (env->cp15.c9_pmcr & PMCRD) {
1429 /* Increment once every 64 processor clock cycles */
1430 prev_cycles /= 64;
1431 }
1432 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1433 }
1434 }
1435
pmevcntr_op_start(CPUARMState * env,uint8_t counter)1436 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1437 {
1438
1439 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1440 uint64_t count = 0;
1441 if (event_supported(event)) {
1442 uint16_t event_idx = supported_event_map[event];
1443 count = pm_events[event_idx].get_count(env);
1444 }
1445
1446 if (pmu_counter_enabled(env, counter)) {
1447 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1448
1449 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1450 env->cp15.c9_pmovsr |= (1 << counter);
1451 pmu_update_irq(env);
1452 }
1453 env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1454 }
1455 env->cp15.c14_pmevcntr_delta[counter] = count;
1456 }
1457
pmevcntr_op_finish(CPUARMState * env,uint8_t counter)1458 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1459 {
1460 if (pmu_counter_enabled(env, counter)) {
1461 #ifndef CONFIG_USER_ONLY
1462 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1463 uint16_t event_idx = supported_event_map[event];
1464 uint64_t delta = UINT32_MAX -
1465 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1466 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1467
1468 if (overflow_in > 0) {
1469 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1470 overflow_in;
1471 ARMCPU *cpu = env_archcpu(env);
1472 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1473 }
1474 #endif
1475
1476 env->cp15.c14_pmevcntr_delta[counter] -=
1477 env->cp15.c14_pmevcntr[counter];
1478 }
1479 }
1480
pmu_op_start(CPUARMState * env)1481 void pmu_op_start(CPUARMState *env)
1482 {
1483 unsigned int i;
1484 pmccntr_op_start(env);
1485 for (i = 0; i < pmu_num_counters(env); i++) {
1486 pmevcntr_op_start(env, i);
1487 }
1488 }
1489
pmu_op_finish(CPUARMState * env)1490 void pmu_op_finish(CPUARMState *env)
1491 {
1492 unsigned int i;
1493 pmccntr_op_finish(env);
1494 for (i = 0; i < pmu_num_counters(env); i++) {
1495 pmevcntr_op_finish(env, i);
1496 }
1497 }
1498
pmu_pre_el_change(ARMCPU * cpu,void * ignored)1499 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1500 {
1501 pmu_op_start(&cpu->env);
1502 }
1503
pmu_post_el_change(ARMCPU * cpu,void * ignored)1504 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1505 {
1506 pmu_op_finish(&cpu->env);
1507 }
1508
arm_pmu_timer_cb(void * opaque)1509 void arm_pmu_timer_cb(void *opaque)
1510 {
1511 ARMCPU *cpu = opaque;
1512
1513 /*
1514 * Update all the counter values based on the current underlying counts,
1515 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1516 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1517 * counter may expire.
1518 */
1519 pmu_op_start(&cpu->env);
1520 pmu_op_finish(&cpu->env);
1521 }
1522
pmcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1523 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1524 uint64_t value)
1525 {
1526 pmu_op_start(env);
1527
1528 if (value & PMCRC) {
1529 /* The counter has been reset */
1530 env->cp15.c15_ccnt = 0;
1531 }
1532
1533 if (value & PMCRP) {
1534 unsigned int i;
1535 for (i = 0; i < pmu_num_counters(env); i++) {
1536 env->cp15.c14_pmevcntr[i] = 0;
1537 }
1538 }
1539
1540 /* only the DP, X, D and E bits are writable */
1541 env->cp15.c9_pmcr &= ~0x39;
1542 env->cp15.c9_pmcr |= (value & 0x39);
1543
1544 pmu_op_finish(env);
1545 }
1546
pmswinc_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1547 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1548 uint64_t value)
1549 {
1550 unsigned int i;
1551 for (i = 0; i < pmu_num_counters(env); i++) {
1552 /* Increment a counter's count iff: */
1553 if ((value & (1 << i)) && /* counter's bit is set */
1554 /* counter is enabled and not filtered */
1555 pmu_counter_enabled(env, i) &&
1556 /* counter is SW_INCR */
1557 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1558 pmevcntr_op_start(env, i);
1559
1560 /*
1561 * Detect if this write causes an overflow since we can't predict
1562 * PMSWINC overflows like we can for other events
1563 */
1564 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1565
1566 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1567 env->cp15.c9_pmovsr |= (1 << i);
1568 pmu_update_irq(env);
1569 }
1570
1571 env->cp15.c14_pmevcntr[i] = new_pmswinc;
1572
1573 pmevcntr_op_finish(env, i);
1574 }
1575 }
1576 }
1577
pmccntr_read(CPUARMState * env,const ARMCPRegInfo * ri)1578 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1579 {
1580 uint64_t ret;
1581 pmccntr_op_start(env);
1582 ret = env->cp15.c15_ccnt;
1583 pmccntr_op_finish(env);
1584 return ret;
1585 }
1586
pmselr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1587 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1588 uint64_t value)
1589 {
1590 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1591 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1592 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1593 * accessed.
1594 */
1595 env->cp15.c9_pmselr = value & 0x1f;
1596 }
1597
pmccntr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1598 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1599 uint64_t value)
1600 {
1601 pmccntr_op_start(env);
1602 env->cp15.c15_ccnt = value;
1603 pmccntr_op_finish(env);
1604 }
1605
pmccntr_write32(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1606 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1607 uint64_t value)
1608 {
1609 uint64_t cur_val = pmccntr_read(env, NULL);
1610
1611 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1612 }
1613
pmccfiltr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1614 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1615 uint64_t value)
1616 {
1617 pmccntr_op_start(env);
1618 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1619 pmccntr_op_finish(env);
1620 }
1621
pmccfiltr_write_a32(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1622 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1623 uint64_t value)
1624 {
1625 pmccntr_op_start(env);
1626 /* M is not accessible from AArch32 */
1627 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1628 (value & PMCCFILTR);
1629 pmccntr_op_finish(env);
1630 }
1631
pmccfiltr_read_a32(CPUARMState * env,const ARMCPRegInfo * ri)1632 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1633 {
1634 /* M is not visible in AArch32 */
1635 return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1636 }
1637
pmcntenset_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1638 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1639 uint64_t value)
1640 {
1641 value &= pmu_counter_mask(env);
1642 env->cp15.c9_pmcnten |= value;
1643 }
1644
pmcntenclr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1645 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1646 uint64_t value)
1647 {
1648 value &= pmu_counter_mask(env);
1649 env->cp15.c9_pmcnten &= ~value;
1650 }
1651
pmovsr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1652 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1653 uint64_t value)
1654 {
1655 value &= pmu_counter_mask(env);
1656 env->cp15.c9_pmovsr &= ~value;
1657 pmu_update_irq(env);
1658 }
1659
pmovsset_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1660 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1661 uint64_t value)
1662 {
1663 value &= pmu_counter_mask(env);
1664 env->cp15.c9_pmovsr |= value;
1665 pmu_update_irq(env);
1666 }
1667
pmevtyper_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value,const uint8_t counter)1668 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1669 uint64_t value, const uint8_t counter)
1670 {
1671 if (counter == 31) {
1672 pmccfiltr_write(env, ri, value);
1673 } else if (counter < pmu_num_counters(env)) {
1674 pmevcntr_op_start(env, counter);
1675
1676 /*
1677 * If this counter's event type is changing, store the current
1678 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1679 * pmevcntr_op_finish has the correct baseline when it converts back to
1680 * a delta.
1681 */
1682 uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1683 PMXEVTYPER_EVTCOUNT;
1684 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1685 if (old_event != new_event) {
1686 uint64_t count = 0;
1687 if (event_supported(new_event)) {
1688 uint16_t event_idx = supported_event_map[new_event];
1689 count = pm_events[event_idx].get_count(env);
1690 }
1691 env->cp15.c14_pmevcntr_delta[counter] = count;
1692 }
1693
1694 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1695 pmevcntr_op_finish(env, counter);
1696 }
1697 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1698 * PMSELR value is equal to or greater than the number of implemented
1699 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1700 */
1701 }
1702
pmevtyper_read(CPUARMState * env,const ARMCPRegInfo * ri,const uint8_t counter)1703 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1704 const uint8_t counter)
1705 {
1706 if (counter == 31) {
1707 return env->cp15.pmccfiltr_el0;
1708 } else if (counter < pmu_num_counters(env)) {
1709 return env->cp15.c14_pmevtyper[counter];
1710 } else {
1711 /*
1712 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1713 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1714 */
1715 return 0;
1716 }
1717 }
1718
pmevtyper_writefn(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1719 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1720 uint64_t value)
1721 {
1722 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1723 pmevtyper_write(env, ri, value, counter);
1724 }
1725
pmevtyper_rawwrite(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1726 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1727 uint64_t value)
1728 {
1729 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1730 env->cp15.c14_pmevtyper[counter] = value;
1731
1732 /*
1733 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1734 * pmu_op_finish calls when loading saved state for a migration. Because
1735 * we're potentially updating the type of event here, the value written to
1736 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1737 * different counter type. Therefore, we need to set this value to the
1738 * current count for the counter type we're writing so that pmu_op_finish
1739 * has the correct count for its calculation.
1740 */
1741 uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1742 if (event_supported(event)) {
1743 uint16_t event_idx = supported_event_map[event];
1744 env->cp15.c14_pmevcntr_delta[counter] =
1745 pm_events[event_idx].get_count(env);
1746 }
1747 }
1748
pmevtyper_readfn(CPUARMState * env,const ARMCPRegInfo * ri)1749 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1750 {
1751 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1752 return pmevtyper_read(env, ri, counter);
1753 }
1754
pmxevtyper_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1755 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1756 uint64_t value)
1757 {
1758 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1759 }
1760
pmxevtyper_read(CPUARMState * env,const ARMCPRegInfo * ri)1761 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1762 {
1763 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1764 }
1765
pmevcntr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value,uint8_t counter)1766 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1767 uint64_t value, uint8_t counter)
1768 {
1769 if (counter < pmu_num_counters(env)) {
1770 pmevcntr_op_start(env, counter);
1771 env->cp15.c14_pmevcntr[counter] = value;
1772 pmevcntr_op_finish(env, counter);
1773 }
1774 /*
1775 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1776 * are CONSTRAINED UNPREDICTABLE.
1777 */
1778 }
1779
pmevcntr_read(CPUARMState * env,const ARMCPRegInfo * ri,uint8_t counter)1780 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1781 uint8_t counter)
1782 {
1783 if (counter < pmu_num_counters(env)) {
1784 uint64_t ret;
1785 pmevcntr_op_start(env, counter);
1786 ret = env->cp15.c14_pmevcntr[counter];
1787 pmevcntr_op_finish(env, counter);
1788 return ret;
1789 } else {
1790 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1791 * are CONSTRAINED UNPREDICTABLE. */
1792 return 0;
1793 }
1794 }
1795
pmevcntr_writefn(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1796 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1797 uint64_t value)
1798 {
1799 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1800 pmevcntr_write(env, ri, value, counter);
1801 }
1802
pmevcntr_readfn(CPUARMState * env,const ARMCPRegInfo * ri)1803 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1804 {
1805 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1806 return pmevcntr_read(env, ri, counter);
1807 }
1808
pmevcntr_rawwrite(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1809 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1810 uint64_t value)
1811 {
1812 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1813 assert(counter < pmu_num_counters(env));
1814 env->cp15.c14_pmevcntr[counter] = value;
1815 pmevcntr_write(env, ri, value, counter);
1816 }
1817
pmevcntr_rawread(CPUARMState * env,const ARMCPRegInfo * ri)1818 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1819 {
1820 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1821 assert(counter < pmu_num_counters(env));
1822 return env->cp15.c14_pmevcntr[counter];
1823 }
1824
pmxevcntr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1825 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1826 uint64_t value)
1827 {
1828 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1829 }
1830
pmxevcntr_read(CPUARMState * env,const ARMCPRegInfo * ri)1831 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1832 {
1833 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1834 }
1835
pmuserenr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1836 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1837 uint64_t value)
1838 {
1839 if (arm_feature(env, ARM_FEATURE_V8)) {
1840 env->cp15.c9_pmuserenr = value & 0xf;
1841 } else {
1842 env->cp15.c9_pmuserenr = value & 1;
1843 }
1844 }
1845
pmintenset_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1846 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1847 uint64_t value)
1848 {
1849 /* We have no event counters so only the C bit can be changed */
1850 value &= pmu_counter_mask(env);
1851 env->cp15.c9_pminten |= value;
1852 pmu_update_irq(env);
1853 }
1854
pmintenclr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1855 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1856 uint64_t value)
1857 {
1858 value &= pmu_counter_mask(env);
1859 env->cp15.c9_pminten &= ~value;
1860 pmu_update_irq(env);
1861 }
1862
vbar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1863 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1864 uint64_t value)
1865 {
1866 /* Note that even though the AArch64 view of this register has bits
1867 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1868 * architectural requirements for bits which are RES0 only in some
1869 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1870 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1871 */
1872 raw_write(env, ri, value & ~0x1FULL);
1873 }
1874
scr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1875 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1876 {
1877 /* Begin with base v8.0 state. */
1878 uint32_t valid_mask = 0x3fff;
1879 ARMCPU *cpu = env_archcpu(env);
1880
1881 if (arm_el_is_aa64(env, 3)) {
1882 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
1883 valid_mask &= ~SCR_NET;
1884 } else {
1885 valid_mask &= ~(SCR_RW | SCR_ST);
1886 }
1887
1888 if (!arm_feature(env, ARM_FEATURE_EL2)) {
1889 valid_mask &= ~SCR_HCE;
1890
1891 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1892 * supported if EL2 exists. The bit is UNK/SBZP when
1893 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1894 * when EL2 is unavailable.
1895 * On ARMv8, this bit is always available.
1896 */
1897 if (arm_feature(env, ARM_FEATURE_V7) &&
1898 !arm_feature(env, ARM_FEATURE_V8)) {
1899 valid_mask &= ~SCR_SMD;
1900 }
1901 }
1902 if (cpu_isar_feature(aa64_lor, cpu)) {
1903 valid_mask |= SCR_TLOR;
1904 }
1905 if (cpu_isar_feature(aa64_pauth, cpu)) {
1906 valid_mask |= SCR_API | SCR_APK;
1907 }
1908
1909 /* Clear all-context RES0 bits. */
1910 value &= valid_mask;
1911 raw_write(env, ri, value);
1912 }
1913
ccsidr_read(CPUARMState * env,const ARMCPRegInfo * ri)1914 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1915 {
1916 ARMCPU *cpu = env_archcpu(env);
1917
1918 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1919 * bank
1920 */
1921 uint32_t index = A32_BANKED_REG_GET(env, csselr,
1922 ri->secure & ARM_CP_SECSTATE_S);
1923
1924 return cpu->ccsidr[index];
1925 }
1926
csselr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1927 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1928 uint64_t value)
1929 {
1930 raw_write(env, ri, value & 0xf);
1931 }
1932
isr_read(CPUARMState * env,const ARMCPRegInfo * ri)1933 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1934 {
1935 CPUState *cs = env_cpu(env);
1936 uint64_t hcr_el2 = arm_hcr_el2_eff(env);
1937 uint64_t ret = 0;
1938 bool allow_virt = (arm_current_el(env) == 1 &&
1939 (!arm_is_secure_below_el3(env) ||
1940 (env->cp15.scr_el3 & SCR_EEL2)));
1941
1942 if (allow_virt && (hcr_el2 & HCR_IMO)) {
1943 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
1944 ret |= CPSR_I;
1945 }
1946 } else {
1947 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1948 ret |= CPSR_I;
1949 }
1950 }
1951
1952 if (allow_virt && (hcr_el2 & HCR_FMO)) {
1953 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
1954 ret |= CPSR_F;
1955 }
1956 } else {
1957 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1958 ret |= CPSR_F;
1959 }
1960 }
1961
1962 /* External aborts are not possible in QEMU so A bit is always clear */
1963 return ret;
1964 }
1965
1966 static const ARMCPRegInfo v7_cp_reginfo[] = {
1967 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1968 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1969 .access = PL1_W, .type = ARM_CP_NOP },
1970 /* Performance monitors are implementation defined in v7,
1971 * but with an ARM recommended set of registers, which we
1972 * follow.
1973 *
1974 * Performance registers fall into three categories:
1975 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1976 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1977 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1978 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1979 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1980 */
1981 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1982 .access = PL0_RW, .type = ARM_CP_ALIAS,
1983 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1984 .writefn = pmcntenset_write,
1985 .accessfn = pmreg_access,
1986 .raw_writefn = raw_write },
1987 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1988 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1989 .access = PL0_RW, .accessfn = pmreg_access,
1990 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1991 .writefn = pmcntenset_write, .raw_writefn = raw_write },
1992 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1993 .access = PL0_RW,
1994 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1995 .accessfn = pmreg_access,
1996 .writefn = pmcntenclr_write,
1997 .type = ARM_CP_ALIAS },
1998 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1999 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
2000 .access = PL0_RW, .accessfn = pmreg_access,
2001 .type = ARM_CP_ALIAS,
2002 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
2003 .writefn = pmcntenclr_write },
2004 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
2005 .access = PL0_RW, .type = ARM_CP_IO,
2006 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2007 .accessfn = pmreg_access,
2008 .writefn = pmovsr_write,
2009 .raw_writefn = raw_write },
2010 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
2011 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
2012 .access = PL0_RW, .accessfn = pmreg_access,
2013 .type = ARM_CP_ALIAS | ARM_CP_IO,
2014 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2015 .writefn = pmovsr_write,
2016 .raw_writefn = raw_write },
2017 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
2018 .access = PL0_W, .accessfn = pmreg_access_swinc,
2019 .type = ARM_CP_NO_RAW | ARM_CP_IO,
2020 .writefn = pmswinc_write },
2021 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
2022 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
2023 .access = PL0_W, .accessfn = pmreg_access_swinc,
2024 .type = ARM_CP_NO_RAW | ARM_CP_IO,
2025 .writefn = pmswinc_write },
2026 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
2027 .access = PL0_RW, .type = ARM_CP_ALIAS,
2028 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
2029 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
2030 .raw_writefn = raw_write},
2031 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
2032 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
2033 .access = PL0_RW, .accessfn = pmreg_access_selr,
2034 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
2035 .writefn = pmselr_write, .raw_writefn = raw_write, },
2036 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
2037 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
2038 .readfn = pmccntr_read, .writefn = pmccntr_write32,
2039 .accessfn = pmreg_access_ccntr },
2040 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
2041 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
2042 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
2043 .type = ARM_CP_IO,
2044 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2045 .readfn = pmccntr_read, .writefn = pmccntr_write,
2046 .raw_readfn = raw_read, .raw_writefn = raw_write, },
2047 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2048 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2049 .access = PL0_RW, .accessfn = pmreg_access,
2050 .type = ARM_CP_ALIAS | ARM_CP_IO,
2051 .resetvalue = 0, },
2052 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2053 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
2054 .writefn = pmccfiltr_write, .raw_writefn = raw_write,
2055 .access = PL0_RW, .accessfn = pmreg_access,
2056 .type = ARM_CP_IO,
2057 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2058 .resetvalue = 0, },
2059 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
2060 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2061 .accessfn = pmreg_access,
2062 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2063 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2064 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
2065 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2066 .accessfn = pmreg_access,
2067 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2068 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2069 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2070 .accessfn = pmreg_access_xevcntr,
2071 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2072 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2073 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2074 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2075 .accessfn = pmreg_access_xevcntr,
2076 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2077 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2078 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2079 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2080 .resetvalue = 0,
2081 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2082 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2083 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2084 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2085 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2086 .resetvalue = 0,
2087 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2088 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2089 .access = PL1_RW, .accessfn = access_tpm,
2090 .type = ARM_CP_ALIAS | ARM_CP_IO,
2091 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2092 .resetvalue = 0,
2093 .writefn = pmintenset_write, .raw_writefn = raw_write },
2094 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2095 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2096 .access = PL1_RW, .accessfn = access_tpm,
2097 .type = ARM_CP_IO,
2098 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2099 .writefn = pmintenset_write, .raw_writefn = raw_write,
2100 .resetvalue = 0x0 },
2101 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2102 .access = PL1_RW, .accessfn = access_tpm,
2103 .type = ARM_CP_ALIAS | ARM_CP_IO,
2104 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2105 .writefn = pmintenclr_write, },
2106 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2107 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2108 .access = PL1_RW, .accessfn = access_tpm,
2109 .type = ARM_CP_ALIAS | ARM_CP_IO,
2110 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2111 .writefn = pmintenclr_write },
2112 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2113 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2114 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2115 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2116 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2117 .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
2118 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2119 offsetof(CPUARMState, cp15.csselr_ns) } },
2120 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2121 * just RAZ for all cores:
2122 */
2123 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2124 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2125 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2126 /* Auxiliary fault status registers: these also are IMPDEF, and we
2127 * choose to RAZ/WI for all cores.
2128 */
2129 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2130 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2131 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2132 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2133 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2134 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2135 /* MAIR can just read-as-written because we don't implement caches
2136 * and so don't need to care about memory attributes.
2137 */
2138 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2139 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2140 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2141 .resetvalue = 0 },
2142 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2143 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2144 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2145 .resetvalue = 0 },
2146 /* For non-long-descriptor page tables these are PRRR and NMRR;
2147 * regardless they still act as reads-as-written for QEMU.
2148 */
2149 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2150 * allows them to assign the correct fieldoffset based on the endianness
2151 * handled in the field definitions.
2152 */
2153 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2154 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
2155 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2156 offsetof(CPUARMState, cp15.mair0_ns) },
2157 .resetfn = arm_cp_reset_ignore },
2158 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2159 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
2160 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2161 offsetof(CPUARMState, cp15.mair1_ns) },
2162 .resetfn = arm_cp_reset_ignore },
2163 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2164 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2165 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2166 /* 32 bit ITLB invalidates */
2167 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2168 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2169 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2170 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2171 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2172 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2173 /* 32 bit DTLB invalidates */
2174 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2175 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2176 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2177 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2178 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2179 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2180 /* 32 bit TLB invalidates */
2181 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2182 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2183 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2184 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2185 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2186 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2187 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2188 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
2189 REGINFO_SENTINEL
2190 };
2191
2192 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2193 /* 32 bit TLB invalidates, Inner Shareable */
2194 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2195 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
2196 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2197 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
2198 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2199 .type = ARM_CP_NO_RAW, .access = PL1_W,
2200 .writefn = tlbiasid_is_write },
2201 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2202 .type = ARM_CP_NO_RAW, .access = PL1_W,
2203 .writefn = tlbimvaa_is_write },
2204 REGINFO_SENTINEL
2205 };
2206
2207 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2208 /* PMOVSSET is not implemented in v7 before v7ve */
2209 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2210 .access = PL0_RW, .accessfn = pmreg_access,
2211 .type = ARM_CP_ALIAS | ARM_CP_IO,
2212 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2213 .writefn = pmovsset_write,
2214 .raw_writefn = raw_write },
2215 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2216 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2217 .access = PL0_RW, .accessfn = pmreg_access,
2218 .type = ARM_CP_ALIAS | ARM_CP_IO,
2219 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2220 .writefn = pmovsset_write,
2221 .raw_writefn = raw_write },
2222 REGINFO_SENTINEL
2223 };
2224
teecr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2225 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2226 uint64_t value)
2227 {
2228 value &= 1;
2229 env->teecr = value;
2230 }
2231
teehbr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)2232 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2233 bool isread)
2234 {
2235 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2236 return CP_ACCESS_TRAP;
2237 }
2238 return CP_ACCESS_OK;
2239 }
2240
2241 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2242 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2243 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2244 .resetvalue = 0,
2245 .writefn = teecr_write },
2246 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2247 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2248 .accessfn = teehbr_access, .resetvalue = 0 },
2249 REGINFO_SENTINEL
2250 };
2251
2252 static const ARMCPRegInfo v6k_cp_reginfo[] = {
2253 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2254 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2255 .access = PL0_RW,
2256 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2257 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2258 .access = PL0_RW,
2259 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2260 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2261 .resetfn = arm_cp_reset_ignore },
2262 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2263 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2264 .access = PL0_R|PL1_W,
2265 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2266 .resetvalue = 0},
2267 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2268 .access = PL0_R|PL1_W,
2269 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2270 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2271 .resetfn = arm_cp_reset_ignore },
2272 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2273 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2274 .access = PL1_RW,
2275 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2276 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2277 .access = PL1_RW,
2278 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2279 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2280 .resetvalue = 0 },
2281 REGINFO_SENTINEL
2282 };
2283
2284 #ifndef CONFIG_USER_ONLY
2285
gt_cntfrq_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)2286 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2287 bool isread)
2288 {
2289 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2290 * Writable only at the highest implemented exception level.
2291 */
2292 int el = arm_current_el(env);
2293
2294 switch (el) {
2295 case 0:
2296 if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
2297 return CP_ACCESS_TRAP;
2298 }
2299 break;
2300 case 1:
2301 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2302 arm_is_secure_below_el3(env)) {
2303 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2304 return CP_ACCESS_TRAP_UNCATEGORIZED;
2305 }
2306 break;
2307 case 2:
2308 case 3:
2309 break;
2310 }
2311
2312 if (!isread && el < arm_highest_el(env)) {
2313 return CP_ACCESS_TRAP_UNCATEGORIZED;
2314 }
2315
2316 return CP_ACCESS_OK;
2317 }
2318
gt_counter_access(CPUARMState * env,int timeridx,bool isread)2319 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2320 bool isread)
2321 {
2322 unsigned int cur_el = arm_current_el(env);
2323 bool secure = arm_is_secure(env);
2324
2325 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
2326 if (cur_el == 0 &&
2327 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2328 return CP_ACCESS_TRAP;
2329 }
2330
2331 if (arm_feature(env, ARM_FEATURE_EL2) &&
2332 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
2333 !extract32(env->cp15.cnthctl_el2, 0, 1)) {
2334 return CP_ACCESS_TRAP_EL2;
2335 }
2336 return CP_ACCESS_OK;
2337 }
2338
gt_timer_access(CPUARMState * env,int timeridx,bool isread)2339 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2340 bool isread)
2341 {
2342 unsigned int cur_el = arm_current_el(env);
2343 bool secure = arm_is_secure(env);
2344
2345 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
2346 * EL0[PV]TEN is zero.
2347 */
2348 if (cur_el == 0 &&
2349 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2350 return CP_ACCESS_TRAP;
2351 }
2352
2353 if (arm_feature(env, ARM_FEATURE_EL2) &&
2354 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
2355 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2356 return CP_ACCESS_TRAP_EL2;
2357 }
2358 return CP_ACCESS_OK;
2359 }
2360
gt_pct_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)2361 static CPAccessResult gt_pct_access(CPUARMState *env,
2362 const ARMCPRegInfo *ri,
2363 bool isread)
2364 {
2365 return gt_counter_access(env, GTIMER_PHYS, isread);
2366 }
2367
gt_vct_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)2368 static CPAccessResult gt_vct_access(CPUARMState *env,
2369 const ARMCPRegInfo *ri,
2370 bool isread)
2371 {
2372 return gt_counter_access(env, GTIMER_VIRT, isread);
2373 }
2374
gt_ptimer_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)2375 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2376 bool isread)
2377 {
2378 return gt_timer_access(env, GTIMER_PHYS, isread);
2379 }
2380
gt_vtimer_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)2381 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2382 bool isread)
2383 {
2384 return gt_timer_access(env, GTIMER_VIRT, isread);
2385 }
2386
gt_stimer_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)2387 static CPAccessResult gt_stimer_access(CPUARMState *env,
2388 const ARMCPRegInfo *ri,
2389 bool isread)
2390 {
2391 /* The AArch64 register view of the secure physical timer is
2392 * always accessible from EL3, and configurably accessible from
2393 * Secure EL1.
2394 */
2395 switch (arm_current_el(env)) {
2396 case 1:
2397 if (!arm_is_secure(env)) {
2398 return CP_ACCESS_TRAP;
2399 }
2400 if (!(env->cp15.scr_el3 & SCR_ST)) {
2401 return CP_ACCESS_TRAP_EL3;
2402 }
2403 return CP_ACCESS_OK;
2404 case 0:
2405 case 2:
2406 return CP_ACCESS_TRAP;
2407 case 3:
2408 return CP_ACCESS_OK;
2409 default:
2410 g_assert_not_reached();
2411 }
2412 }
2413
gt_get_countervalue(CPUARMState * env)2414 static uint64_t gt_get_countervalue(CPUARMState *env)
2415 {
2416 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
2417 }
2418
gt_recalc_timer(ARMCPU * cpu,int timeridx)2419 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2420 {
2421 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2422
2423 if (gt->ctl & 1) {
2424 /* Timer enabled: calculate and set current ISTATUS, irq, and
2425 * reset timer to when ISTATUS next has to change
2426 */
2427 uint64_t offset = timeridx == GTIMER_VIRT ?
2428 cpu->env.cp15.cntvoff_el2 : 0;
2429 uint64_t count = gt_get_countervalue(&cpu->env);
2430 /* Note that this must be unsigned 64 bit arithmetic: */
2431 int istatus = count - offset >= gt->cval;
2432 uint64_t nexttick;
2433 int irqstate;
2434
2435 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2436
2437 irqstate = (istatus && !(gt->ctl & 2));
2438 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2439
2440 if (istatus) {
2441 /* Next transition is when count rolls back over to zero */
2442 nexttick = UINT64_MAX;
2443 } else {
2444 /* Next transition is when we hit cval */
2445 nexttick = gt->cval + offset;
2446 }
2447 /* Note that the desired next expiry time might be beyond the
2448 * signed-64-bit range of a QEMUTimer -- in this case we just
2449 * set the timer for as far in the future as possible. When the
2450 * timer expires we will reset the timer for any remaining period.
2451 */
2452 if (nexttick > INT64_MAX / GTIMER_SCALE) {
2453 nexttick = INT64_MAX / GTIMER_SCALE;
2454 }
2455 timer_mod(cpu->gt_timer[timeridx], nexttick);
2456 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
2457 } else {
2458 /* Timer disabled: ISTATUS and timer output always clear */
2459 gt->ctl &= ~4;
2460 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
2461 timer_del(cpu->gt_timer[timeridx]);
2462 trace_arm_gt_recalc_disabled(timeridx);
2463 }
2464 }
2465
gt_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx)2466 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2467 int timeridx)
2468 {
2469 ARMCPU *cpu = env_archcpu(env);
2470
2471 timer_del(cpu->gt_timer[timeridx]);
2472 }
2473
gt_cnt_read(CPUARMState * env,const ARMCPRegInfo * ri)2474 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2475 {
2476 return gt_get_countervalue(env);
2477 }
2478
gt_virt_cnt_read(CPUARMState * env,const ARMCPRegInfo * ri)2479 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2480 {
2481 return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
2482 }
2483
gt_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx,uint64_t value)2484 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2485 int timeridx,
2486 uint64_t value)
2487 {
2488 trace_arm_gt_cval_write(timeridx, value);
2489 env->cp15.c14_timer[timeridx].cval = value;
2490 gt_recalc_timer(env_archcpu(env), timeridx);
2491 }
2492
gt_tval_read(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx)2493 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2494 int timeridx)
2495 {
2496 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
2497
2498 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2499 (gt_get_countervalue(env) - offset));
2500 }
2501
gt_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx,uint64_t value)2502 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2503 int timeridx,
2504 uint64_t value)
2505 {
2506 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
2507
2508 trace_arm_gt_tval_write(timeridx, value);
2509 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2510 sextract64(value, 0, 32);
2511 gt_recalc_timer(env_archcpu(env), timeridx);
2512 }
2513
gt_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,int timeridx,uint64_t value)2514 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2515 int timeridx,
2516 uint64_t value)
2517 {
2518 ARMCPU *cpu = env_archcpu(env);
2519 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2520
2521 trace_arm_gt_ctl_write(timeridx, value);
2522 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2523 if ((oldval ^ value) & 1) {
2524 /* Enable toggled */
2525 gt_recalc_timer(cpu, timeridx);
2526 } else if ((oldval ^ value) & 2) {
2527 /* IMASK toggled: don't need to recalculate,
2528 * just set the interrupt line based on ISTATUS
2529 */
2530 int irqstate = (oldval & 4) && !(value & 2);
2531
2532 trace_arm_gt_imask_toggle(timeridx, irqstate);
2533 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2534 }
2535 }
2536
gt_phys_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)2537 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2538 {
2539 gt_timer_reset(env, ri, GTIMER_PHYS);
2540 }
2541
gt_phys_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2542 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2543 uint64_t value)
2544 {
2545 gt_cval_write(env, ri, GTIMER_PHYS, value);
2546 }
2547
gt_phys_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)2548 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2549 {
2550 return gt_tval_read(env, ri, GTIMER_PHYS);
2551 }
2552
gt_phys_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2553 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2554 uint64_t value)
2555 {
2556 gt_tval_write(env, ri, GTIMER_PHYS, value);
2557 }
2558
gt_phys_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2559 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2560 uint64_t value)
2561 {
2562 gt_ctl_write(env, ri, GTIMER_PHYS, value);
2563 }
2564
gt_virt_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)2565 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2566 {
2567 gt_timer_reset(env, ri, GTIMER_VIRT);
2568 }
2569
gt_virt_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2570 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2571 uint64_t value)
2572 {
2573 gt_cval_write(env, ri, GTIMER_VIRT, value);
2574 }
2575
gt_virt_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)2576 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2577 {
2578 return gt_tval_read(env, ri, GTIMER_VIRT);
2579 }
2580
gt_virt_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2581 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2582 uint64_t value)
2583 {
2584 gt_tval_write(env, ri, GTIMER_VIRT, value);
2585 }
2586
gt_virt_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2587 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2588 uint64_t value)
2589 {
2590 gt_ctl_write(env, ri, GTIMER_VIRT, value);
2591 }
2592
gt_cntvoff_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2593 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2594 uint64_t value)
2595 {
2596 ARMCPU *cpu = env_archcpu(env);
2597
2598 trace_arm_gt_cntvoff_write(value);
2599 raw_write(env, ri, value);
2600 gt_recalc_timer(cpu, GTIMER_VIRT);
2601 }
2602
gt_hyp_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)2603 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2604 {
2605 gt_timer_reset(env, ri, GTIMER_HYP);
2606 }
2607
gt_hyp_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2608 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2609 uint64_t value)
2610 {
2611 gt_cval_write(env, ri, GTIMER_HYP, value);
2612 }
2613
gt_hyp_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)2614 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2615 {
2616 return gt_tval_read(env, ri, GTIMER_HYP);
2617 }
2618
gt_hyp_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2619 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2620 uint64_t value)
2621 {
2622 gt_tval_write(env, ri, GTIMER_HYP, value);
2623 }
2624
gt_hyp_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2625 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2626 uint64_t value)
2627 {
2628 gt_ctl_write(env, ri, GTIMER_HYP, value);
2629 }
2630
gt_sec_timer_reset(CPUARMState * env,const ARMCPRegInfo * ri)2631 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2632 {
2633 gt_timer_reset(env, ri, GTIMER_SEC);
2634 }
2635
gt_sec_cval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2636 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2637 uint64_t value)
2638 {
2639 gt_cval_write(env, ri, GTIMER_SEC, value);
2640 }
2641
gt_sec_tval_read(CPUARMState * env,const ARMCPRegInfo * ri)2642 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2643 {
2644 return gt_tval_read(env, ri, GTIMER_SEC);
2645 }
2646
gt_sec_tval_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2647 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2648 uint64_t value)
2649 {
2650 gt_tval_write(env, ri, GTIMER_SEC, value);
2651 }
2652
gt_sec_ctl_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2653 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2654 uint64_t value)
2655 {
2656 gt_ctl_write(env, ri, GTIMER_SEC, value);
2657 }
2658
arm_gt_ptimer_cb(void * opaque)2659 void arm_gt_ptimer_cb(void *opaque)
2660 {
2661 ARMCPU *cpu = opaque;
2662
2663 gt_recalc_timer(cpu, GTIMER_PHYS);
2664 }
2665
arm_gt_vtimer_cb(void * opaque)2666 void arm_gt_vtimer_cb(void *opaque)
2667 {
2668 ARMCPU *cpu = opaque;
2669
2670 gt_recalc_timer(cpu, GTIMER_VIRT);
2671 }
2672
arm_gt_htimer_cb(void * opaque)2673 void arm_gt_htimer_cb(void *opaque)
2674 {
2675 ARMCPU *cpu = opaque;
2676
2677 gt_recalc_timer(cpu, GTIMER_HYP);
2678 }
2679
arm_gt_stimer_cb(void * opaque)2680 void arm_gt_stimer_cb(void *opaque)
2681 {
2682 ARMCPU *cpu = opaque;
2683
2684 gt_recalc_timer(cpu, GTIMER_SEC);
2685 }
2686
2687 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2688 /* Note that CNTFRQ is purely reads-as-written for the benefit
2689 * of software; writing it doesn't actually change the timer frequency.
2690 * Our reset value matches the fixed frequency we implement the timer at.
2691 */
2692 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
2693 .type = ARM_CP_ALIAS,
2694 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2695 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
2696 },
2697 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2698 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2699 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2700 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2701 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
2702 },
2703 /* overall control: mostly access permissions */
2704 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
2705 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
2706 .access = PL1_RW,
2707 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2708 .resetvalue = 0,
2709 },
2710 /* per-timer control */
2711 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2712 .secure = ARM_CP_SECSTATE_NS,
2713 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2714 .accessfn = gt_ptimer_access,
2715 .fieldoffset = offsetoflow32(CPUARMState,
2716 cp15.c14_timer[GTIMER_PHYS].ctl),
2717 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2718 },
2719 { .name = "CNTP_CTL_S",
2720 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2721 .secure = ARM_CP_SECSTATE_S,
2722 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2723 .accessfn = gt_ptimer_access,
2724 .fieldoffset = offsetoflow32(CPUARMState,
2725 cp15.c14_timer[GTIMER_SEC].ctl),
2726 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2727 },
2728 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2729 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
2730 .type = ARM_CP_IO, .access = PL0_RW,
2731 .accessfn = gt_ptimer_access,
2732 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2733 .resetvalue = 0,
2734 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2735 },
2736 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
2737 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2738 .accessfn = gt_vtimer_access,
2739 .fieldoffset = offsetoflow32(CPUARMState,
2740 cp15.c14_timer[GTIMER_VIRT].ctl),
2741 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2742 },
2743 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2744 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
2745 .type = ARM_CP_IO, .access = PL0_RW,
2746 .accessfn = gt_vtimer_access,
2747 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2748 .resetvalue = 0,
2749 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2750 },
2751 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2752 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2753 .secure = ARM_CP_SECSTATE_NS,
2754 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2755 .accessfn = gt_ptimer_access,
2756 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2757 },
2758 { .name = "CNTP_TVAL_S",
2759 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2760 .secure = ARM_CP_SECSTATE_S,
2761 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2762 .accessfn = gt_ptimer_access,
2763 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2764 },
2765 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2766 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
2767 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2768 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2769 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2770 },
2771 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
2772 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2773 .accessfn = gt_vtimer_access,
2774 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2775 },
2776 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2777 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
2778 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2779 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2780 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2781 },
2782 /* The counter itself */
2783 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
2784 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2785 .accessfn = gt_pct_access,
2786 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2787 },
2788 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2789 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
2790 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2791 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2792 },
2793 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
2794 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2795 .accessfn = gt_vct_access,
2796 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2797 },
2798 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2799 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2800 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2801 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2802 },
2803 /* Comparison value, indicating when the timer goes off */
2804 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
2805 .secure = ARM_CP_SECSTATE_NS,
2806 .access = PL0_RW,
2807 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2808 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2809 .accessfn = gt_ptimer_access,
2810 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2811 },
2812 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
2813 .secure = ARM_CP_SECSTATE_S,
2814 .access = PL0_RW,
2815 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2816 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2817 .accessfn = gt_ptimer_access,
2818 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2819 },
2820 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2821 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
2822 .access = PL0_RW,
2823 .type = ARM_CP_IO,
2824 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2825 .resetvalue = 0, .accessfn = gt_ptimer_access,
2826 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2827 },
2828 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
2829 .access = PL0_RW,
2830 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2831 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2832 .accessfn = gt_vtimer_access,
2833 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2834 },
2835 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2836 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
2837 .access = PL0_RW,
2838 .type = ARM_CP_IO,
2839 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2840 .resetvalue = 0, .accessfn = gt_vtimer_access,
2841 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2842 },
2843 /* Secure timer -- this is actually restricted to only EL3
2844 * and configurably Secure-EL1 via the accessfn.
2845 */
2846 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2847 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2848 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2849 .accessfn = gt_stimer_access,
2850 .readfn = gt_sec_tval_read,
2851 .writefn = gt_sec_tval_write,
2852 .resetfn = gt_sec_timer_reset,
2853 },
2854 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2855 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2856 .type = ARM_CP_IO, .access = PL1_RW,
2857 .accessfn = gt_stimer_access,
2858 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2859 .resetvalue = 0,
2860 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2861 },
2862 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2863 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2864 .type = ARM_CP_IO, .access = PL1_RW,
2865 .accessfn = gt_stimer_access,
2866 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2867 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2868 },
2869 REGINFO_SENTINEL
2870 };
2871
2872 #else
2873
2874 /* In user-mode most of the generic timer registers are inaccessible
2875 * however modern kernels (4.12+) allow access to cntvct_el0
2876 */
2877
gt_virt_cnt_read(CPUARMState * env,const ARMCPRegInfo * ri)2878 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2879 {
2880 /* Currently we have no support for QEMUTimer in linux-user so we
2881 * can't call gt_get_countervalue(env), instead we directly
2882 * call the lower level functions.
2883 */
2884 return cpu_get_clock() / GTIMER_SCALE;
2885 }
2886
2887 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2888 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2889 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2890 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
2891 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2892 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
2893 },
2894 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2895 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2896 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2897 .readfn = gt_virt_cnt_read,
2898 },
2899 REGINFO_SENTINEL
2900 };
2901
2902 #endif
2903
par_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)2904 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2905 {
2906 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2907 raw_write(env, ri, value);
2908 } else if (arm_feature(env, ARM_FEATURE_V7)) {
2909 raw_write(env, ri, value & 0xfffff6ff);
2910 } else {
2911 raw_write(env, ri, value & 0xfffff1ff);
2912 }
2913 }
2914
2915 #ifndef CONFIG_USER_ONLY
2916 /* get_phys_addr() isn't present for user-mode-only targets */
2917
ats_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)2918 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2919 bool isread)
2920 {
2921 if (ri->opc2 & 4) {
2922 /* The ATS12NSO* operations must trap to EL3 if executed in
2923 * Secure EL1 (which can only happen if EL3 is AArch64).
2924 * They are simply UNDEF if executed from NS EL1.
2925 * They function normally from EL2 or EL3.
2926 */
2927 if (arm_current_el(env) == 1) {
2928 if (arm_is_secure_below_el3(env)) {
2929 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2930 }
2931 return CP_ACCESS_TRAP_UNCATEGORIZED;
2932 }
2933 }
2934 return CP_ACCESS_OK;
2935 }
2936
do_ats_write(CPUARMState * env,uint64_t value,MMUAccessType access_type,ARMMMUIdx mmu_idx)2937 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2938 MMUAccessType access_type, ARMMMUIdx mmu_idx)
2939 {
2940 hwaddr phys_addr;
2941 target_ulong page_size;
2942 int prot;
2943 bool ret;
2944 uint64_t par64;
2945 bool format64 = false;
2946 MemTxAttrs attrs = {};
2947 ARMMMUFaultInfo fi = {};
2948 ARMCacheAttrs cacheattrs = {};
2949
2950 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
2951 &prot, &page_size, &fi, &cacheattrs);
2952
2953 if (ret) {
2954 /*
2955 * Some kinds of translation fault must cause exceptions rather
2956 * than being reported in the PAR.
2957 */
2958 int current_el = arm_current_el(env);
2959 int target_el;
2960 uint32_t syn, fsr, fsc;
2961 bool take_exc = false;
2962
2963 if (fi.s1ptw && current_el == 1 && !arm_is_secure(env)
2964 && (mmu_idx == ARMMMUIdx_S1NSE1 || mmu_idx == ARMMMUIdx_S1NSE0)) {
2965 /*
2966 * Synchronous stage 2 fault on an access made as part of the
2967 * translation table walk for AT S1E0* or AT S1E1* insn
2968 * executed from NS EL1. If this is a synchronous external abort
2969 * and SCR_EL3.EA == 1, then we take a synchronous external abort
2970 * to EL3. Otherwise the fault is taken as an exception to EL2,
2971 * and HPFAR_EL2 holds the faulting IPA.
2972 */
2973 if (fi.type == ARMFault_SyncExternalOnWalk &&
2974 (env->cp15.scr_el3 & SCR_EA)) {
2975 target_el = 3;
2976 } else {
2977 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
2978 target_el = 2;
2979 }
2980 take_exc = true;
2981 } else if (fi.type == ARMFault_SyncExternalOnWalk) {
2982 /*
2983 * Synchronous external aborts during a translation table walk
2984 * are taken as Data Abort exceptions.
2985 */
2986 if (fi.stage2) {
2987 if (current_el == 3) {
2988 target_el = 3;
2989 } else {
2990 target_el = 2;
2991 }
2992 } else {
2993 target_el = exception_target_el(env);
2994 }
2995 take_exc = true;
2996 }
2997
2998 if (take_exc) {
2999 /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3000 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
3001 arm_s1_regime_using_lpae_format(env, mmu_idx)) {
3002 fsr = arm_fi_to_lfsc(&fi);
3003 fsc = extract32(fsr, 0, 6);
3004 } else {
3005 fsr = arm_fi_to_sfsc(&fi);
3006 fsc = 0x3f;
3007 }
3008 /*
3009 * Report exception with ESR indicating a fault due to a
3010 * translation table walk for a cache maintenance instruction.
3011 */
3012 syn = syn_data_abort_no_iss(current_el == target_el,
3013 fi.ea, 1, fi.s1ptw, 1, fsc);
3014 env->exception.vaddress = value;
3015 env->exception.fsr = fsr;
3016 raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
3017 }
3018 }
3019
3020 if (is_a64(env)) {
3021 format64 = true;
3022 } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
3023 /*
3024 * ATS1Cxx:
3025 * * TTBCR.EAE determines whether the result is returned using the
3026 * 32-bit or the 64-bit PAR format
3027 * * Instructions executed in Hyp mode always use the 64bit format
3028 *
3029 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3030 * * The Non-secure TTBCR.EAE bit is set to 1
3031 * * The implementation includes EL2, and the value of HCR.VM is 1
3032 *
3033 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3034 *
3035 * ATS1Hx always uses the 64bit format.
3036 */
3037 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
3038
3039 if (arm_feature(env, ARM_FEATURE_EL2)) {
3040 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
3041 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
3042 } else {
3043 format64 |= arm_current_el(env) == 2;
3044 }
3045 }
3046 }
3047
3048 if (format64) {
3049 /* Create a 64-bit PAR */
3050 par64 = (1 << 11); /* LPAE bit always set */
3051 if (!ret) {
3052 par64 |= phys_addr & ~0xfffULL;
3053 if (!attrs.secure) {
3054 par64 |= (1 << 9); /* NS */
3055 }
3056 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
3057 par64 |= cacheattrs.shareability << 7; /* SH */
3058 } else {
3059 uint32_t fsr = arm_fi_to_lfsc(&fi);
3060
3061 par64 |= 1; /* F */
3062 par64 |= (fsr & 0x3f) << 1; /* FS */
3063 if (fi.stage2) {
3064 par64 |= (1 << 9); /* S */
3065 }
3066 if (fi.s1ptw) {
3067 par64 |= (1 << 8); /* PTW */
3068 }
3069 }
3070 } else {
3071 /* fsr is a DFSR/IFSR value for the short descriptor
3072 * translation table format (with WnR always clear).
3073 * Convert it to a 32-bit PAR.
3074 */
3075 if (!ret) {
3076 /* We do not set any attribute bits in the PAR */
3077 if (page_size == (1 << 24)
3078 && arm_feature(env, ARM_FEATURE_V7)) {
3079 par64 = (phys_addr & 0xff000000) | (1 << 1);
3080 } else {
3081 par64 = phys_addr & 0xfffff000;
3082 }
3083 if (!attrs.secure) {
3084 par64 |= (1 << 9); /* NS */
3085 }
3086 } else {
3087 uint32_t fsr = arm_fi_to_sfsc(&fi);
3088
3089 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
3090 ((fsr & 0xf) << 1) | 1;
3091 }
3092 }
3093 return par64;
3094 }
3095
ats_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3096 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3097 {
3098 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3099 uint64_t par64;
3100 ARMMMUIdx mmu_idx;
3101 int el = arm_current_el(env);
3102 bool secure = arm_is_secure_below_el3(env);
3103
3104 switch (ri->opc2 & 6) {
3105 case 0:
3106 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
3107 switch (el) {
3108 case 3:
3109 mmu_idx = ARMMMUIdx_S1E3;
3110 break;
3111 case 2:
3112 mmu_idx = ARMMMUIdx_S1NSE1;
3113 break;
3114 case 1:
3115 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
3116 break;
3117 default:
3118 g_assert_not_reached();
3119 }
3120 break;
3121 case 2:
3122 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3123 switch (el) {
3124 case 3:
3125 mmu_idx = ARMMMUIdx_S1SE0;
3126 break;
3127 case 2:
3128 mmu_idx = ARMMMUIdx_S1NSE0;
3129 break;
3130 case 1:
3131 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
3132 break;
3133 default:
3134 g_assert_not_reached();
3135 }
3136 break;
3137 case 4:
3138 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3139 mmu_idx = ARMMMUIdx_S12NSE1;
3140 break;
3141 case 6:
3142 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3143 mmu_idx = ARMMMUIdx_S12NSE0;
3144 break;
3145 default:
3146 g_assert_not_reached();
3147 }
3148
3149 par64 = do_ats_write(env, value, access_type, mmu_idx);
3150
3151 A32_BANKED_CURRENT_REG_SET(env, par, par64);
3152 }
3153
ats1h_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3154 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3155 uint64_t value)
3156 {
3157 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3158 uint64_t par64;
3159
3160 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S1E2);
3161
3162 A32_BANKED_CURRENT_REG_SET(env, par, par64);
3163 }
3164
at_s1e2_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3165 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3166 bool isread)
3167 {
3168 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
3169 return CP_ACCESS_TRAP;
3170 }
3171 return CP_ACCESS_OK;
3172 }
3173
ats_write64(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3174 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3175 uint64_t value)
3176 {
3177 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3178 ARMMMUIdx mmu_idx;
3179 int secure = arm_is_secure_below_el3(env);
3180
3181 switch (ri->opc2 & 6) {
3182 case 0:
3183 switch (ri->opc1) {
3184 case 0: /* AT S1E1R, AT S1E1W */
3185 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
3186 break;
3187 case 4: /* AT S1E2R, AT S1E2W */
3188 mmu_idx = ARMMMUIdx_S1E2;
3189 break;
3190 case 6: /* AT S1E3R, AT S1E3W */
3191 mmu_idx = ARMMMUIdx_S1E3;
3192 break;
3193 default:
3194 g_assert_not_reached();
3195 }
3196 break;
3197 case 2: /* AT S1E0R, AT S1E0W */
3198 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
3199 break;
3200 case 4: /* AT S12E1R, AT S12E1W */
3201 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
3202 break;
3203 case 6: /* AT S12E0R, AT S12E0W */
3204 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
3205 break;
3206 default:
3207 g_assert_not_reached();
3208 }
3209
3210 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
3211 }
3212 #endif
3213
3214 static const ARMCPRegInfo vapa_cp_reginfo[] = {
3215 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
3216 .access = PL1_RW, .resetvalue = 0,
3217 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
3218 offsetoflow32(CPUARMState, cp15.par_ns) },
3219 .writefn = par_write },
3220 #ifndef CONFIG_USER_ONLY
3221 /* This underdecoding is safe because the reginfo is NO_RAW. */
3222 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
3223 .access = PL1_W, .accessfn = ats_access,
3224 .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
3225 #endif
3226 REGINFO_SENTINEL
3227 };
3228
3229 /* Return basic MPU access permission bits. */
simple_mpu_ap_bits(uint32_t val)3230 static uint32_t simple_mpu_ap_bits(uint32_t val)
3231 {
3232 uint32_t ret;
3233 uint32_t mask;
3234 int i;
3235 ret = 0;
3236 mask = 3;
3237 for (i = 0; i < 16; i += 2) {
3238 ret |= (val >> i) & mask;
3239 mask <<= 2;
3240 }
3241 return ret;
3242 }
3243
3244 /* Pad basic MPU access permission bits to extended format. */
extended_mpu_ap_bits(uint32_t val)3245 static uint32_t extended_mpu_ap_bits(uint32_t val)
3246 {
3247 uint32_t ret;
3248 uint32_t mask;
3249 int i;
3250 ret = 0;
3251 mask = 3;
3252 for (i = 0; i < 16; i += 2) {
3253 ret |= (val & mask) << i;
3254 mask <<= 2;
3255 }
3256 return ret;
3257 }
3258
pmsav5_data_ap_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3259 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3260 uint64_t value)
3261 {
3262 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
3263 }
3264
pmsav5_data_ap_read(CPUARMState * env,const ARMCPRegInfo * ri)3265 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3266 {
3267 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
3268 }
3269
pmsav5_insn_ap_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3270 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3271 uint64_t value)
3272 {
3273 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
3274 }
3275
pmsav5_insn_ap_read(CPUARMState * env,const ARMCPRegInfo * ri)3276 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3277 {
3278 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
3279 }
3280
pmsav7_read(CPUARMState * env,const ARMCPRegInfo * ri)3281 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3282 {
3283 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3284
3285 if (!u32p) {
3286 return 0;
3287 }
3288
3289 u32p += env->pmsav7.rnr[M_REG_NS];
3290 return *u32p;
3291 }
3292
pmsav7_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3293 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
3294 uint64_t value)
3295 {
3296 ARMCPU *cpu = env_archcpu(env);
3297 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3298
3299 if (!u32p) {
3300 return;
3301 }
3302
3303 u32p += env->pmsav7.rnr[M_REG_NS];
3304 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3305 *u32p = value;
3306 }
3307
pmsav7_rgnr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3308 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3309 uint64_t value)
3310 {
3311 ARMCPU *cpu = env_archcpu(env);
3312 uint32_t nrgs = cpu->pmsav7_dregion;
3313
3314 if (value >= nrgs) {
3315 qemu_log_mask(LOG_GUEST_ERROR,
3316 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3317 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
3318 return;
3319 }
3320
3321 raw_write(env, ri, value);
3322 }
3323
3324 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
3325 /* Reset for all these registers is handled in arm_cpu_reset(),
3326 * because the PMSAv7 is also used by M-profile CPUs, which do
3327 * not register cpregs but still need the state to be reset.
3328 */
3329 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
3330 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3331 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
3332 .readfn = pmsav7_read, .writefn = pmsav7_write,
3333 .resetfn = arm_cp_reset_ignore },
3334 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
3335 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3336 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
3337 .readfn = pmsav7_read, .writefn = pmsav7_write,
3338 .resetfn = arm_cp_reset_ignore },
3339 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
3340 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3341 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
3342 .readfn = pmsav7_read, .writefn = pmsav7_write,
3343 .resetfn = arm_cp_reset_ignore },
3344 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
3345 .access = PL1_RW,
3346 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
3347 .writefn = pmsav7_rgnr_write,
3348 .resetfn = arm_cp_reset_ignore },
3349 REGINFO_SENTINEL
3350 };
3351
3352 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
3353 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3354 .access = PL1_RW, .type = ARM_CP_ALIAS,
3355 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3356 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
3357 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3358 .access = PL1_RW, .type = ARM_CP_ALIAS,
3359 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3360 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
3361 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
3362 .access = PL1_RW,
3363 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3364 .resetvalue = 0, },
3365 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
3366 .access = PL1_RW,
3367 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3368 .resetvalue = 0, },
3369 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
3370 .access = PL1_RW,
3371 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
3372 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
3373 .access = PL1_RW,
3374 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
3375 /* Protection region base and size registers */
3376 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
3377 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3378 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
3379 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
3380 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3381 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
3382 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
3383 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3384 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
3385 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
3386 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3387 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
3388 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
3389 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3390 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
3391 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
3392 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3393 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
3394 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
3395 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3396 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
3397 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
3398 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3399 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
3400 REGINFO_SENTINEL
3401 };
3402
vmsa_ttbcr_raw_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3403 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
3404 uint64_t value)
3405 {
3406 TCR *tcr = raw_ptr(env, ri);
3407 int maskshift = extract32(value, 0, 3);
3408
3409 if (!arm_feature(env, ARM_FEATURE_V8)) {
3410 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
3411 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3412 * using Long-desciptor translation table format */
3413 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
3414 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
3415 /* In an implementation that includes the Security Extensions
3416 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3417 * Short-descriptor translation table format.
3418 */
3419 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
3420 } else {
3421 value &= TTBCR_N;
3422 }
3423 }
3424
3425 /* Update the masks corresponding to the TCR bank being written
3426 * Note that we always calculate mask and base_mask, but
3427 * they are only used for short-descriptor tables (ie if EAE is 0);
3428 * for long-descriptor tables the TCR fields are used differently
3429 * and the mask and base_mask values are meaningless.
3430 */
3431 tcr->raw_tcr = value;
3432 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
3433 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
3434 }
3435
vmsa_ttbcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3436 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3437 uint64_t value)
3438 {
3439 ARMCPU *cpu = env_archcpu(env);
3440 TCR *tcr = raw_ptr(env, ri);
3441
3442 if (arm_feature(env, ARM_FEATURE_LPAE)) {
3443 /* With LPAE the TTBCR could result in a change of ASID
3444 * via the TTBCR.A1 bit, so do a TLB flush.
3445 */
3446 tlb_flush(CPU(cpu));
3447 }
3448 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
3449 value = deposit64(tcr->raw_tcr, 0, 32, value);
3450 vmsa_ttbcr_raw_write(env, ri, value);
3451 }
3452
vmsa_ttbcr_reset(CPUARMState * env,const ARMCPRegInfo * ri)3453 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3454 {
3455 TCR *tcr = raw_ptr(env, ri);
3456
3457 /* Reset both the TCR as well as the masks corresponding to the bank of
3458 * the TCR being reset.
3459 */
3460 tcr->raw_tcr = 0;
3461 tcr->mask = 0;
3462 tcr->base_mask = 0xffffc000u;
3463 }
3464
vmsa_tcr_el1_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3465 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3466 uint64_t value)
3467 {
3468 ARMCPU *cpu = env_archcpu(env);
3469 TCR *tcr = raw_ptr(env, ri);
3470
3471 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3472 tlb_flush(CPU(cpu));
3473 tcr->raw_tcr = value;
3474 }
3475
vmsa_ttbr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3476 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3477 uint64_t value)
3478 {
3479 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
3480 if (cpreg_field_is_64bit(ri) &&
3481 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
3482 ARMCPU *cpu = env_archcpu(env);
3483 tlb_flush(CPU(cpu));
3484 }
3485 raw_write(env, ri, value);
3486 }
3487
vttbr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3488 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3489 uint64_t value)
3490 {
3491 ARMCPU *cpu = env_archcpu(env);
3492 CPUState *cs = CPU(cpu);
3493
3494 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
3495 if (raw_read(env, ri) != value) {
3496 tlb_flush_by_mmuidx(cs,
3497 ARMMMUIdxBit_S12NSE1 |
3498 ARMMMUIdxBit_S12NSE0 |
3499 ARMMMUIdxBit_S2NS);
3500 raw_write(env, ri, value);
3501 }
3502 }
3503
3504 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
3505 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3506 .access = PL1_RW, .type = ARM_CP_ALIAS,
3507 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
3508 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
3509 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3510 .access = PL1_RW, .resetvalue = 0,
3511 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
3512 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
3513 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
3514 .access = PL1_RW, .resetvalue = 0,
3515 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
3516 offsetof(CPUARMState, cp15.dfar_ns) } },
3517 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
3518 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
3519 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
3520 .resetvalue = 0, },
3521 REGINFO_SENTINEL
3522 };
3523
3524 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
3525 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
3526 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
3527 .access = PL1_RW,
3528 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
3529 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
3530 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
3531 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3532 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3533 offsetof(CPUARMState, cp15.ttbr0_ns) } },
3534 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
3535 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
3536 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3537 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3538 offsetof(CPUARMState, cp15.ttbr1_ns) } },
3539 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
3540 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
3541 .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
3542 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
3543 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
3544 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
3545 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
3546 .raw_writefn = vmsa_ttbcr_raw_write,
3547 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
3548 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
3549 REGINFO_SENTINEL
3550 };
3551
3552 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3553 * qemu tlbs nor adjusting cached masks.
3554 */
3555 static const ARMCPRegInfo ttbcr2_reginfo = {
3556 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
3557 .access = PL1_RW, .type = ARM_CP_ALIAS,
3558 .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
3559 offsetofhigh32(CPUARMState, cp15.tcr_el[1]) },
3560 };
3561
omap_ticonfig_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3562 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
3563 uint64_t value)
3564 {
3565 env->cp15.c15_ticonfig = value & 0xe7;
3566 /* The OS_TYPE bit in this register changes the reported CPUID! */
3567 env->cp15.c0_cpuid = (value & (1 << 5)) ?
3568 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
3569 }
3570
omap_threadid_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3571 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
3572 uint64_t value)
3573 {
3574 env->cp15.c15_threadid = value & 0xffff;
3575 }
3576
omap_wfi_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3577 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
3578 uint64_t value)
3579 {
3580 /* Wait-for-interrupt (deprecated) */
3581 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
3582 }
3583
omap_cachemaint_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3584 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
3585 uint64_t value)
3586 {
3587 /* On OMAP there are registers indicating the max/min index of dcache lines
3588 * containing a dirty line; cache flush operations have to reset these.
3589 */
3590 env->cp15.c15_i_max = 0x000;
3591 env->cp15.c15_i_min = 0xff0;
3592 }
3593
3594 static const ARMCPRegInfo omap_cp_reginfo[] = {
3595 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
3596 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
3597 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
3598 .resetvalue = 0, },
3599 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
3600 .access = PL1_RW, .type = ARM_CP_NOP },
3601 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
3602 .access = PL1_RW,
3603 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
3604 .writefn = omap_ticonfig_write },
3605 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
3606 .access = PL1_RW,
3607 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
3608 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
3609 .access = PL1_RW, .resetvalue = 0xff0,
3610 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
3611 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
3612 .access = PL1_RW,
3613 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
3614 .writefn = omap_threadid_write },
3615 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
3616 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
3617 .type = ARM_CP_NO_RAW,
3618 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
3619 /* TODO: Peripheral port remap register:
3620 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3621 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3622 * when MMU is off.
3623 */
3624 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
3625 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
3626 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
3627 .writefn = omap_cachemaint_write },
3628 { .name = "C9", .cp = 15, .crn = 9,
3629 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
3630 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
3631 REGINFO_SENTINEL
3632 };
3633
xscale_cpar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3634 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3635 uint64_t value)
3636 {
3637 env->cp15.c15_cpar = value & 0x3fff;
3638 }
3639
3640 static const ARMCPRegInfo xscale_cp_reginfo[] = {
3641 { .name = "XSCALE_CPAR",
3642 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
3643 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
3644 .writefn = xscale_cpar_write, },
3645 { .name = "XSCALE_AUXCR",
3646 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
3647 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
3648 .resetvalue = 0, },
3649 /* XScale specific cache-lockdown: since we have no cache we NOP these
3650 * and hope the guest does not really rely on cache behaviour.
3651 */
3652 { .name = "XSCALE_LOCK_ICACHE_LINE",
3653 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
3654 .access = PL1_W, .type = ARM_CP_NOP },
3655 { .name = "XSCALE_UNLOCK_ICACHE",
3656 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
3657 .access = PL1_W, .type = ARM_CP_NOP },
3658 { .name = "XSCALE_DCACHE_LOCK",
3659 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
3660 .access = PL1_RW, .type = ARM_CP_NOP },
3661 { .name = "XSCALE_UNLOCK_DCACHE",
3662 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
3663 .access = PL1_W, .type = ARM_CP_NOP },
3664 REGINFO_SENTINEL
3665 };
3666
3667 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
3668 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3669 * implementation of this implementation-defined space.
3670 * Ideally this should eventually disappear in favour of actually
3671 * implementing the correct behaviour for all cores.
3672 */
3673 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
3674 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3675 .access = PL1_RW,
3676 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
3677 .resetvalue = 0 },
3678 REGINFO_SENTINEL
3679 };
3680
3681 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
3682 /* Cache status: RAZ because we have no cache so it's always clean */
3683 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
3684 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3685 .resetvalue = 0 },
3686 REGINFO_SENTINEL
3687 };
3688
3689 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
3690 /* We never have a a block transfer operation in progress */
3691 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
3692 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3693 .resetvalue = 0 },
3694 /* The cache ops themselves: these all NOP for QEMU */
3695 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
3696 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3697 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
3698 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3699 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
3700 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3701 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
3702 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3703 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
3704 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3705 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
3706 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3707 REGINFO_SENTINEL
3708 };
3709
3710 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
3711 /* The cache test-and-clean instructions always return (1 << 30)
3712 * to indicate that there are no dirty cache lines.
3713 */
3714 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
3715 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3716 .resetvalue = (1 << 30) },
3717 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
3718 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3719 .resetvalue = (1 << 30) },
3720 REGINFO_SENTINEL
3721 };
3722
3723 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
3724 /* Ignore ReadBuffer accesses */
3725 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
3726 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3727 .access = PL1_RW, .resetvalue = 0,
3728 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
3729 REGINFO_SENTINEL
3730 };
3731
midr_read(CPUARMState * env,const ARMCPRegInfo * ri)3732 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3733 {
3734 ARMCPU *cpu = env_archcpu(env);
3735 unsigned int cur_el = arm_current_el(env);
3736 bool secure = arm_is_secure(env);
3737
3738 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3739 return env->cp15.vpidr_el2;
3740 }
3741 return raw_read(env, ri);
3742 }
3743
mpidr_read_val(CPUARMState * env)3744 static uint64_t mpidr_read_val(CPUARMState *env)
3745 {
3746 ARMCPU *cpu = env_archcpu(env);
3747 uint64_t mpidr = cpu->mp_affinity;
3748
3749 if (arm_feature(env, ARM_FEATURE_V7MP)) {
3750 mpidr |= (1U << 31);
3751 /* Cores which are uniprocessor (non-coherent)
3752 * but still implement the MP extensions set
3753 * bit 30. (For instance, Cortex-R5).
3754 */
3755 if (cpu->mp_is_up) {
3756 mpidr |= (1u << 30);
3757 }
3758 }
3759 return mpidr;
3760 }
3761
mpidr_read(CPUARMState * env,const ARMCPRegInfo * ri)3762 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3763 {
3764 unsigned int cur_el = arm_current_el(env);
3765 bool secure = arm_is_secure(env);
3766
3767 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3768 return env->cp15.vmpidr_el2;
3769 }
3770 return mpidr_read_val(env);
3771 }
3772
3773 static const ARMCPRegInfo lpae_cp_reginfo[] = {
3774 /* NOP AMAIR0/1 */
3775 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
3776 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
3777 .access = PL1_RW, .type = ARM_CP_CONST,
3778 .resetvalue = 0 },
3779 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3780 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
3781 .access = PL1_RW, .type = ARM_CP_CONST,
3782 .resetvalue = 0 },
3783 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
3784 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
3785 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
3786 offsetof(CPUARMState, cp15.par_ns)} },
3787 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
3788 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3789 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3790 offsetof(CPUARMState, cp15.ttbr0_ns) },
3791 .writefn = vmsa_ttbr_write, },
3792 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
3793 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3794 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3795 offsetof(CPUARMState, cp15.ttbr1_ns) },
3796 .writefn = vmsa_ttbr_write, },
3797 REGINFO_SENTINEL
3798 };
3799
aa64_fpcr_read(CPUARMState * env,const ARMCPRegInfo * ri)3800 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3801 {
3802 return vfp_get_fpcr(env);
3803 }
3804
aa64_fpcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3805 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3806 uint64_t value)
3807 {
3808 vfp_set_fpcr(env, value);
3809 }
3810
aa64_fpsr_read(CPUARMState * env,const ARMCPRegInfo * ri)3811 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3812 {
3813 return vfp_get_fpsr(env);
3814 }
3815
aa64_fpsr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3816 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3817 uint64_t value)
3818 {
3819 vfp_set_fpsr(env, value);
3820 }
3821
aa64_daif_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3822 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
3823 bool isread)
3824 {
3825 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
3826 return CP_ACCESS_TRAP;
3827 }
3828 return CP_ACCESS_OK;
3829 }
3830
aa64_daif_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3831 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
3832 uint64_t value)
3833 {
3834 env->daif = value & PSTATE_DAIF;
3835 }
3836
aa64_cacheop_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)3837 static CPAccessResult aa64_cacheop_access(CPUARMState *env,
3838 const ARMCPRegInfo *ri,
3839 bool isread)
3840 {
3841 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
3842 * SCTLR_EL1.UCI is set.
3843 */
3844 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
3845 return CP_ACCESS_TRAP;
3846 }
3847 return CP_ACCESS_OK;
3848 }
3849
3850 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3851 * Page D4-1736 (DDI0487A.b)
3852 */
3853
tlbi_aa64_vmalle1is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3854 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3855 uint64_t value)
3856 {
3857 CPUState *cs = env_cpu(env);
3858 bool sec = arm_is_secure_below_el3(env);
3859
3860 if (sec) {
3861 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3862 ARMMMUIdxBit_S1SE1 |
3863 ARMMMUIdxBit_S1SE0);
3864 } else {
3865 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3866 ARMMMUIdxBit_S12NSE1 |
3867 ARMMMUIdxBit_S12NSE0);
3868 }
3869 }
3870
tlbi_aa64_vmalle1_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3871 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3872 uint64_t value)
3873 {
3874 CPUState *cs = env_cpu(env);
3875
3876 if (tlb_force_broadcast(env)) {
3877 tlbi_aa64_vmalle1is_write(env, NULL, value);
3878 return;
3879 }
3880
3881 if (arm_is_secure_below_el3(env)) {
3882 tlb_flush_by_mmuidx(cs,
3883 ARMMMUIdxBit_S1SE1 |
3884 ARMMMUIdxBit_S1SE0);
3885 } else {
3886 tlb_flush_by_mmuidx(cs,
3887 ARMMMUIdxBit_S12NSE1 |
3888 ARMMMUIdxBit_S12NSE0);
3889 }
3890 }
3891
tlbi_aa64_alle1_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3892 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3893 uint64_t value)
3894 {
3895 /* Note that the 'ALL' scope must invalidate both stage 1 and
3896 * stage 2 translations, whereas most other scopes only invalidate
3897 * stage 1 translations.
3898 */
3899 ARMCPU *cpu = env_archcpu(env);
3900 CPUState *cs = CPU(cpu);
3901
3902 if (arm_is_secure_below_el3(env)) {
3903 tlb_flush_by_mmuidx(cs,
3904 ARMMMUIdxBit_S1SE1 |
3905 ARMMMUIdxBit_S1SE0);
3906 } else {
3907 if (arm_feature(env, ARM_FEATURE_EL2)) {
3908 tlb_flush_by_mmuidx(cs,
3909 ARMMMUIdxBit_S12NSE1 |
3910 ARMMMUIdxBit_S12NSE0 |
3911 ARMMMUIdxBit_S2NS);
3912 } else {
3913 tlb_flush_by_mmuidx(cs,
3914 ARMMMUIdxBit_S12NSE1 |
3915 ARMMMUIdxBit_S12NSE0);
3916 }
3917 }
3918 }
3919
tlbi_aa64_alle2_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3920 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3921 uint64_t value)
3922 {
3923 ARMCPU *cpu = env_archcpu(env);
3924 CPUState *cs = CPU(cpu);
3925
3926 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
3927 }
3928
tlbi_aa64_alle3_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3929 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3930 uint64_t value)
3931 {
3932 ARMCPU *cpu = env_archcpu(env);
3933 CPUState *cs = CPU(cpu);
3934
3935 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3);
3936 }
3937
tlbi_aa64_alle1is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3938 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3939 uint64_t value)
3940 {
3941 /* Note that the 'ALL' scope must invalidate both stage 1 and
3942 * stage 2 translations, whereas most other scopes only invalidate
3943 * stage 1 translations.
3944 */
3945 CPUState *cs = env_cpu(env);
3946 bool sec = arm_is_secure_below_el3(env);
3947 bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
3948
3949 if (sec) {
3950 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3951 ARMMMUIdxBit_S1SE1 |
3952 ARMMMUIdxBit_S1SE0);
3953 } else if (has_el2) {
3954 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3955 ARMMMUIdxBit_S12NSE1 |
3956 ARMMMUIdxBit_S12NSE0 |
3957 ARMMMUIdxBit_S2NS);
3958 } else {
3959 tlb_flush_by_mmuidx_all_cpus_synced(cs,
3960 ARMMMUIdxBit_S12NSE1 |
3961 ARMMMUIdxBit_S12NSE0);
3962 }
3963 }
3964
tlbi_aa64_alle2is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3965 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3966 uint64_t value)
3967 {
3968 CPUState *cs = env_cpu(env);
3969
3970 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
3971 }
3972
tlbi_aa64_alle3is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3973 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3974 uint64_t value)
3975 {
3976 CPUState *cs = env_cpu(env);
3977
3978 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
3979 }
3980
tlbi_aa64_vae2_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3981 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3982 uint64_t value)
3983 {
3984 /* Invalidate by VA, EL2
3985 * Currently handles both VAE2 and VALE2, since we don't support
3986 * flush-last-level-only.
3987 */
3988 ARMCPU *cpu = env_archcpu(env);
3989 CPUState *cs = CPU(cpu);
3990 uint64_t pageaddr = sextract64(value << 12, 0, 56);
3991
3992 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
3993 }
3994
tlbi_aa64_vae3_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)3995 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3996 uint64_t value)
3997 {
3998 /* Invalidate by VA, EL3
3999 * Currently handles both VAE3 and VALE3, since we don't support
4000 * flush-last-level-only.
4001 */
4002 ARMCPU *cpu = env_archcpu(env);
4003 CPUState *cs = CPU(cpu);
4004 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4005
4006 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3);
4007 }
4008
tlbi_aa64_vae1is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4009 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4010 uint64_t value)
4011 {
4012 ARMCPU *cpu = env_archcpu(env);
4013 CPUState *cs = CPU(cpu);
4014 bool sec = arm_is_secure_below_el3(env);
4015 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4016
4017 if (sec) {
4018 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
4019 ARMMMUIdxBit_S1SE1 |
4020 ARMMMUIdxBit_S1SE0);
4021 } else {
4022 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
4023 ARMMMUIdxBit_S12NSE1 |
4024 ARMMMUIdxBit_S12NSE0);
4025 }
4026 }
4027
tlbi_aa64_vae1_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4028 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4029 uint64_t value)
4030 {
4031 /* Invalidate by VA, EL1&0 (AArch64 version).
4032 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4033 * since we don't support flush-for-specific-ASID-only or
4034 * flush-last-level-only.
4035 */
4036 ARMCPU *cpu = env_archcpu(env);
4037 CPUState *cs = CPU(cpu);
4038 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4039
4040 if (tlb_force_broadcast(env)) {
4041 tlbi_aa64_vae1is_write(env, NULL, value);
4042 return;
4043 }
4044
4045 if (arm_is_secure_below_el3(env)) {
4046 tlb_flush_page_by_mmuidx(cs, pageaddr,
4047 ARMMMUIdxBit_S1SE1 |
4048 ARMMMUIdxBit_S1SE0);
4049 } else {
4050 tlb_flush_page_by_mmuidx(cs, pageaddr,
4051 ARMMMUIdxBit_S12NSE1 |
4052 ARMMMUIdxBit_S12NSE0);
4053 }
4054 }
4055
tlbi_aa64_vae2is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4056 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4057 uint64_t value)
4058 {
4059 CPUState *cs = env_cpu(env);
4060 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4061
4062 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
4063 ARMMMUIdxBit_S1E2);
4064 }
4065
tlbi_aa64_vae3is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4066 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4067 uint64_t value)
4068 {
4069 CPUState *cs = env_cpu(env);
4070 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4071
4072 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
4073 ARMMMUIdxBit_S1E3);
4074 }
4075
tlbi_aa64_ipas2e1_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4076 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4077 uint64_t value)
4078 {
4079 /* Invalidate by IPA. This has to invalidate any structures that
4080 * contain only stage 2 translation information, but does not need
4081 * to apply to structures that contain combined stage 1 and stage 2
4082 * translation information.
4083 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
4084 */
4085 ARMCPU *cpu = env_archcpu(env);
4086 CPUState *cs = CPU(cpu);
4087 uint64_t pageaddr;
4088
4089 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
4090 return;
4091 }
4092
4093 pageaddr = sextract64(value << 12, 0, 48);
4094
4095 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
4096 }
4097
tlbi_aa64_ipas2e1is_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4098 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4099 uint64_t value)
4100 {
4101 CPUState *cs = env_cpu(env);
4102 uint64_t pageaddr;
4103
4104 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
4105 return;
4106 }
4107
4108 pageaddr = sextract64(value << 12, 0, 48);
4109
4110 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
4111 ARMMMUIdxBit_S2NS);
4112 }
4113
aa64_zva_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4114 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
4115 bool isread)
4116 {
4117 /* We don't implement EL2, so the only control on DC ZVA is the
4118 * bit in the SCTLR which can prohibit access for EL0.
4119 */
4120 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
4121 return CP_ACCESS_TRAP;
4122 }
4123 return CP_ACCESS_OK;
4124 }
4125
aa64_dczid_read(CPUARMState * env,const ARMCPRegInfo * ri)4126 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
4127 {
4128 ARMCPU *cpu = env_archcpu(env);
4129 int dzp_bit = 1 << 4;
4130
4131 /* DZP indicates whether DC ZVA access is allowed */
4132 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
4133 dzp_bit = 0;
4134 }
4135 return cpu->dcz_blocksize | dzp_bit;
4136 }
4137
sp_el0_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4138 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4139 bool isread)
4140 {
4141 if (!(env->pstate & PSTATE_SP)) {
4142 /* Access to SP_EL0 is undefined if it's being used as
4143 * the stack pointer.
4144 */
4145 return CP_ACCESS_TRAP_UNCATEGORIZED;
4146 }
4147 return CP_ACCESS_OK;
4148 }
4149
spsel_read(CPUARMState * env,const ARMCPRegInfo * ri)4150 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
4151 {
4152 return env->pstate & PSTATE_SP;
4153 }
4154
spsel_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t val)4155 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4156 {
4157 update_spsel(env, val);
4158 }
4159
sctlr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4160 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4161 uint64_t value)
4162 {
4163 ARMCPU *cpu = env_archcpu(env);
4164
4165 if (raw_read(env, ri) == value) {
4166 /* Skip the TLB flush if nothing actually changed; Linux likes
4167 * to do a lot of pointless SCTLR writes.
4168 */
4169 return;
4170 }
4171
4172 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
4173 /* M bit is RAZ/WI for PMSA with no MPU implemented */
4174 value &= ~SCTLR_M;
4175 }
4176
4177 raw_write(env, ri, value);
4178 /* ??? Lots of these bits are not implemented. */
4179 /* This may enable/disable the MMU, so do a TLB flush. */
4180 tlb_flush(CPU(cpu));
4181
4182 if (ri->type & ARM_CP_SUPPRESS_TB_END) {
4183 /*
4184 * Normally we would always end the TB on an SCTLR write; see the
4185 * comment in ARMCPRegInfo sctlr initialization below for why Xscale
4186 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
4187 * of hflags from the translator, so do it here.
4188 */
4189 arm_rebuild_hflags(env);
4190 }
4191 }
4192
fpexc32_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)4193 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
4194 bool isread)
4195 {
4196 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
4197 return CP_ACCESS_TRAP_FP_EL2;
4198 }
4199 if (env->cp15.cptr_el[3] & CPTR_TFP) {
4200 return CP_ACCESS_TRAP_FP_EL3;
4201 }
4202 return CP_ACCESS_OK;
4203 }
4204
sdcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4205 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4206 uint64_t value)
4207 {
4208 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
4209 }
4210
4211 static const ARMCPRegInfo v8_cp_reginfo[] = {
4212 /* Minimal set of EL0-visible registers. This will need to be expanded
4213 * significantly for system emulation of AArch64 CPUs.
4214 */
4215 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
4216 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
4217 .access = PL0_RW, .type = ARM_CP_NZCV },
4218 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
4219 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
4220 .type = ARM_CP_NO_RAW,
4221 .access = PL0_RW, .accessfn = aa64_daif_access,
4222 .fieldoffset = offsetof(CPUARMState, daif),
4223 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
4224 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
4225 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
4226 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4227 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
4228 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
4229 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
4230 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4231 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
4232 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
4233 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
4234 .access = PL0_R, .type = ARM_CP_NO_RAW,
4235 .readfn = aa64_dczid_read },
4236 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
4237 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
4238 .access = PL0_W, .type = ARM_CP_DC_ZVA,
4239 #ifndef CONFIG_USER_ONLY
4240 /* Avoid overhead of an access check that always passes in user-mode */
4241 .accessfn = aa64_zva_access,
4242 #endif
4243 },
4244 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
4245 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
4246 .access = PL1_R, .type = ARM_CP_CURRENTEL },
4247 /* Cache ops: all NOPs since we don't emulate caches */
4248 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
4249 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4250 .access = PL1_W, .type = ARM_CP_NOP },
4251 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
4252 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4253 .access = PL1_W, .type = ARM_CP_NOP },
4254 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
4255 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
4256 .access = PL0_W, .type = ARM_CP_NOP,
4257 .accessfn = aa64_cacheop_access },
4258 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
4259 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4260 .access = PL1_W, .type = ARM_CP_NOP },
4261 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
4262 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4263 .access = PL1_W, .type = ARM_CP_NOP },
4264 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
4265 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
4266 .access = PL0_W, .type = ARM_CP_NOP,
4267 .accessfn = aa64_cacheop_access },
4268 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
4269 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4270 .access = PL1_W, .type = ARM_CP_NOP },
4271 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
4272 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
4273 .access = PL0_W, .type = ARM_CP_NOP,
4274 .accessfn = aa64_cacheop_access },
4275 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
4276 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
4277 .access = PL0_W, .type = ARM_CP_NOP,
4278 .accessfn = aa64_cacheop_access },
4279 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
4280 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4281 .access = PL1_W, .type = ARM_CP_NOP },
4282 /* TLBI operations */
4283 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
4284 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
4285 .access = PL1_W, .type = ARM_CP_NO_RAW,
4286 .writefn = tlbi_aa64_vmalle1is_write },
4287 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
4288 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
4289 .access = PL1_W, .type = ARM_CP_NO_RAW,
4290 .writefn = tlbi_aa64_vae1is_write },
4291 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
4292 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
4293 .access = PL1_W, .type = ARM_CP_NO_RAW,
4294 .writefn = tlbi_aa64_vmalle1is_write },
4295 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
4296 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
4297 .access = PL1_W, .type = ARM_CP_NO_RAW,
4298 .writefn = tlbi_aa64_vae1is_write },
4299 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
4300 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4301 .access = PL1_W, .type = ARM_CP_NO_RAW,
4302 .writefn = tlbi_aa64_vae1is_write },
4303 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
4304 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
4305 .access = PL1_W, .type = ARM_CP_NO_RAW,
4306 .writefn = tlbi_aa64_vae1is_write },
4307 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
4308 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
4309 .access = PL1_W, .type = ARM_CP_NO_RAW,
4310 .writefn = tlbi_aa64_vmalle1_write },
4311 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
4312 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
4313 .access = PL1_W, .type = ARM_CP_NO_RAW,
4314 .writefn = tlbi_aa64_vae1_write },
4315 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
4316 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
4317 .access = PL1_W, .type = ARM_CP_NO_RAW,
4318 .writefn = tlbi_aa64_vmalle1_write },
4319 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
4320 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
4321 .access = PL1_W, .type = ARM_CP_NO_RAW,
4322 .writefn = tlbi_aa64_vae1_write },
4323 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
4324 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
4325 .access = PL1_W, .type = ARM_CP_NO_RAW,
4326 .writefn = tlbi_aa64_vae1_write },
4327 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
4328 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
4329 .access = PL1_W, .type = ARM_CP_NO_RAW,
4330 .writefn = tlbi_aa64_vae1_write },
4331 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
4332 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4333 .access = PL2_W, .type = ARM_CP_NO_RAW,
4334 .writefn = tlbi_aa64_ipas2e1is_write },
4335 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
4336 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4337 .access = PL2_W, .type = ARM_CP_NO_RAW,
4338 .writefn = tlbi_aa64_ipas2e1is_write },
4339 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
4340 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4341 .access = PL2_W, .type = ARM_CP_NO_RAW,
4342 .writefn = tlbi_aa64_alle1is_write },
4343 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
4344 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
4345 .access = PL2_W, .type = ARM_CP_NO_RAW,
4346 .writefn = tlbi_aa64_alle1is_write },
4347 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
4348 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
4349 .access = PL2_W, .type = ARM_CP_NO_RAW,
4350 .writefn = tlbi_aa64_ipas2e1_write },
4351 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
4352 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
4353 .access = PL2_W, .type = ARM_CP_NO_RAW,
4354 .writefn = tlbi_aa64_ipas2e1_write },
4355 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
4356 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4357 .access = PL2_W, .type = ARM_CP_NO_RAW,
4358 .writefn = tlbi_aa64_alle1_write },
4359 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
4360 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
4361 .access = PL2_W, .type = ARM_CP_NO_RAW,
4362 .writefn = tlbi_aa64_alle1is_write },
4363 #ifndef CONFIG_USER_ONLY
4364 /* 64 bit address translation operations */
4365 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
4366 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
4367 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4368 .writefn = ats_write64 },
4369 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
4370 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
4371 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4372 .writefn = ats_write64 },
4373 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
4374 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
4375 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4376 .writefn = ats_write64 },
4377 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
4378 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
4379 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4380 .writefn = ats_write64 },
4381 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
4382 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
4383 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4384 .writefn = ats_write64 },
4385 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
4386 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
4387 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4388 .writefn = ats_write64 },
4389 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
4390 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
4391 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4392 .writefn = ats_write64 },
4393 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
4394 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
4395 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4396 .writefn = ats_write64 },
4397 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4398 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
4399 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
4400 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4401 .writefn = ats_write64 },
4402 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
4403 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
4404 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4405 .writefn = ats_write64 },
4406 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
4407 .type = ARM_CP_ALIAS,
4408 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
4409 .access = PL1_RW, .resetvalue = 0,
4410 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
4411 .writefn = par_write },
4412 #endif
4413 /* TLB invalidate last level of translation table walk */
4414 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4415 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
4416 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
4417 .type = ARM_CP_NO_RAW, .access = PL1_W,
4418 .writefn = tlbimvaa_is_write },
4419 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
4420 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
4421 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
4422 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
4423 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4424 .type = ARM_CP_NO_RAW, .access = PL2_W,
4425 .writefn = tlbimva_hyp_write },
4426 { .name = "TLBIMVALHIS",
4427 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
4428 .type = ARM_CP_NO_RAW, .access = PL2_W,
4429 .writefn = tlbimva_hyp_is_write },
4430 { .name = "TLBIIPAS2",
4431 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
4432 .type = ARM_CP_NO_RAW, .access = PL2_W,
4433 .writefn = tlbiipas2_write },
4434 { .name = "TLBIIPAS2IS",
4435 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4436 .type = ARM_CP_NO_RAW, .access = PL2_W,
4437 .writefn = tlbiipas2_is_write },
4438 { .name = "TLBIIPAS2L",
4439 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
4440 .type = ARM_CP_NO_RAW, .access = PL2_W,
4441 .writefn = tlbiipas2_write },
4442 { .name = "TLBIIPAS2LIS",
4443 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4444 .type = ARM_CP_NO_RAW, .access = PL2_W,
4445 .writefn = tlbiipas2_is_write },
4446 /* 32 bit cache operations */
4447 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4448 .type = ARM_CP_NOP, .access = PL1_W },
4449 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
4450 .type = ARM_CP_NOP, .access = PL1_W },
4451 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4452 .type = ARM_CP_NOP, .access = PL1_W },
4453 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
4454 .type = ARM_CP_NOP, .access = PL1_W },
4455 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
4456 .type = ARM_CP_NOP, .access = PL1_W },
4457 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
4458 .type = ARM_CP_NOP, .access = PL1_W },
4459 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4460 .type = ARM_CP_NOP, .access = PL1_W },
4461 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4462 .type = ARM_CP_NOP, .access = PL1_W },
4463 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
4464 .type = ARM_CP_NOP, .access = PL1_W },
4465 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4466 .type = ARM_CP_NOP, .access = PL1_W },
4467 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
4468 .type = ARM_CP_NOP, .access = PL1_W },
4469 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
4470 .type = ARM_CP_NOP, .access = PL1_W },
4471 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4472 .type = ARM_CP_NOP, .access = PL1_W },
4473 /* MMU Domain access control / MPU write buffer control */
4474 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
4475 .access = PL1_RW, .resetvalue = 0,
4476 .writefn = dacr_write, .raw_writefn = raw_write,
4477 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
4478 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
4479 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
4480 .type = ARM_CP_ALIAS,
4481 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
4482 .access = PL1_RW,
4483 .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
4484 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
4485 .type = ARM_CP_ALIAS,
4486 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
4487 .access = PL1_RW,
4488 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
4489 /* We rely on the access checks not allowing the guest to write to the
4490 * state field when SPSel indicates that it's being used as the stack
4491 * pointer.
4492 */
4493 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
4494 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
4495 .access = PL1_RW, .accessfn = sp_el0_access,
4496 .type = ARM_CP_ALIAS,
4497 .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
4498 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
4499 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
4500 .access = PL2_RW, .type = ARM_CP_ALIAS,
4501 .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
4502 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
4503 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
4504 .type = ARM_CP_NO_RAW,
4505 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
4506 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
4507 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
4508 .type = ARM_CP_ALIAS,
4509 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
4510 .access = PL2_RW, .accessfn = fpexc32_access },
4511 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
4512 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
4513 .access = PL2_RW, .resetvalue = 0,
4514 .writefn = dacr_write, .raw_writefn = raw_write,
4515 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
4516 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
4517 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
4518 .access = PL2_RW, .resetvalue = 0,
4519 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
4520 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
4521 .type = ARM_CP_ALIAS,
4522 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
4523 .access = PL2_RW,
4524 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
4525 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
4526 .type = ARM_CP_ALIAS,
4527 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
4528 .access = PL2_RW,
4529 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
4530 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
4531 .type = ARM_CP_ALIAS,
4532 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
4533 .access = PL2_RW,
4534 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
4535 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
4536 .type = ARM_CP_ALIAS,
4537 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
4538 .access = PL2_RW,
4539 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
4540 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
4541 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
4542 .resetvalue = 0,
4543 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
4544 { .name = "SDCR", .type = ARM_CP_ALIAS,
4545 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
4546 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4547 .writefn = sdcr_write,
4548 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
4549 REGINFO_SENTINEL
4550 };
4551
4552 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
4553 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
4554 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4555 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4556 .access = PL2_RW,
4557 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
4558 { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH,
4559 .type = ARM_CP_NO_RAW,
4560 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4561 .access = PL2_RW,
4562 .type = ARM_CP_CONST, .resetvalue = 0 },
4563 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
4564 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
4565 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4566 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4567 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4568 .access = PL2_RW,
4569 .type = ARM_CP_CONST, .resetvalue = 0 },
4570 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4571 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4572 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4573 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4574 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4575 .access = PL2_RW, .type = ARM_CP_CONST,
4576 .resetvalue = 0 },
4577 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4578 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4579 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4580 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4581 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4582 .access = PL2_RW, .type = ARM_CP_CONST,
4583 .resetvalue = 0 },
4584 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
4585 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
4586 .access = PL2_RW, .type = ARM_CP_CONST,
4587 .resetvalue = 0 },
4588 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4589 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4590 .access = PL2_RW, .type = ARM_CP_CONST,
4591 .resetvalue = 0 },
4592 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4593 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4594 .access = PL2_RW, .type = ARM_CP_CONST,
4595 .resetvalue = 0 },
4596 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4597 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4598 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4599 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
4600 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4601 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4602 .type = ARM_CP_CONST, .resetvalue = 0 },
4603 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4604 .cp = 15, .opc1 = 6, .crm = 2,
4605 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4606 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
4607 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4608 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4609 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4610 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4611 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4612 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4613 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4614 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4615 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4616 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4617 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4618 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4619 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4620 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4621 .resetvalue = 0 },
4622 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
4623 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
4624 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4625 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
4626 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
4627 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4628 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
4629 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4630 .resetvalue = 0 },
4631 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4632 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
4633 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4634 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
4635 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4636 .resetvalue = 0 },
4637 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4638 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4639 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4640 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4641 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4642 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4643 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
4644 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
4645 .access = PL2_RW, .accessfn = access_tda,
4646 .type = ARM_CP_CONST, .resetvalue = 0 },
4647 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
4648 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4649 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4650 .type = ARM_CP_CONST, .resetvalue = 0 },
4651 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4652 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4653 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4654 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4655 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4656 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4657 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4658 .type = ARM_CP_CONST,
4659 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4660 .access = PL2_RW, .resetvalue = 0 },
4661 REGINFO_SENTINEL
4662 };
4663
4664 /* Ditto, but for registers which exist in ARMv8 but not v7 */
4665 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
4666 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
4667 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
4668 .access = PL2_RW,
4669 .type = ARM_CP_CONST, .resetvalue = 0 },
4670 REGINFO_SENTINEL
4671 };
4672
hcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4673 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
4674 {
4675 ARMCPU *cpu = env_archcpu(env);
4676 uint64_t valid_mask = HCR_MASK;
4677
4678 if (arm_feature(env, ARM_FEATURE_EL3)) {
4679 valid_mask &= ~HCR_HCD;
4680 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
4681 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
4682 * However, if we're using the SMC PSCI conduit then QEMU is
4683 * effectively acting like EL3 firmware and so the guest at
4684 * EL2 should retain the ability to prevent EL1 from being
4685 * able to make SMC calls into the ersatz firmware, so in
4686 * that case HCR.TSC should be read/write.
4687 */
4688 valid_mask &= ~HCR_TSC;
4689 }
4690 if (cpu_isar_feature(aa64_lor, cpu)) {
4691 valid_mask |= HCR_TLOR;
4692 }
4693 if (cpu_isar_feature(aa64_pauth, cpu)) {
4694 valid_mask |= HCR_API | HCR_APK;
4695 }
4696
4697 /* Clear RES0 bits. */
4698 value &= valid_mask;
4699
4700 /* These bits change the MMU setup:
4701 * HCR_VM enables stage 2 translation
4702 * HCR_PTW forbids certain page-table setups
4703 * HCR_DC Disables stage1 and enables stage2 translation
4704 */
4705 if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
4706 tlb_flush(CPU(cpu));
4707 }
4708 env->cp15.hcr_el2 = value;
4709
4710 /*
4711 * Updates to VI and VF require us to update the status of
4712 * virtual interrupts, which are the logical OR of these bits
4713 * and the state of the input lines from the GIC. (This requires
4714 * that we have the iothread lock, which is done by marking the
4715 * reginfo structs as ARM_CP_IO.)
4716 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
4717 * possible for it to be taken immediately, because VIRQ and
4718 * VFIQ are masked unless running at EL0 or EL1, and HCR
4719 * can only be written at EL2.
4720 */
4721 g_assert(qemu_mutex_iothread_locked());
4722 arm_cpu_update_virq(cpu);
4723 arm_cpu_update_vfiq(cpu);
4724 }
4725
hcr_writehigh(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4726 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
4727 uint64_t value)
4728 {
4729 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
4730 value = deposit64(env->cp15.hcr_el2, 32, 32, value);
4731 hcr_write(env, NULL, value);
4732 }
4733
hcr_writelow(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4734 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
4735 uint64_t value)
4736 {
4737 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
4738 value = deposit64(env->cp15.hcr_el2, 0, 32, value);
4739 hcr_write(env, NULL, value);
4740 }
4741
4742 /*
4743 * Return the effective value of HCR_EL2.
4744 * Bits that are not included here:
4745 * RW (read from SCR_EL3.RW as needed)
4746 */
arm_hcr_el2_eff(CPUARMState * env)4747 uint64_t arm_hcr_el2_eff(CPUARMState *env)
4748 {
4749 uint64_t ret = env->cp15.hcr_el2;
4750
4751 if (arm_is_secure_below_el3(env)) {
4752 /*
4753 * "This register has no effect if EL2 is not enabled in the
4754 * current Security state". This is ARMv8.4-SecEL2 speak for
4755 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
4756 *
4757 * Prior to that, the language was "In an implementation that
4758 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
4759 * as if this field is 0 for all purposes other than a direct
4760 * read or write access of HCR_EL2". With lots of enumeration
4761 * on a per-field basis. In current QEMU, this is condition
4762 * is arm_is_secure_below_el3.
4763 *
4764 * Since the v8.4 language applies to the entire register, and
4765 * appears to be backward compatible, use that.
4766 */
4767 ret = 0;
4768 } else if (ret & HCR_TGE) {
4769 /* These bits are up-to-date as of ARMv8.4. */
4770 if (ret & HCR_E2H) {
4771 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
4772 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
4773 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
4774 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE);
4775 } else {
4776 ret |= HCR_FMO | HCR_IMO | HCR_AMO;
4777 }
4778 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
4779 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
4780 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
4781 HCR_TLOR);
4782 }
4783
4784 return ret;
4785 }
4786
cptr_el2_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)4787 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4788 uint64_t value)
4789 {
4790 /*
4791 * For A-profile AArch32 EL3, if NSACR.CP10
4792 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4793 */
4794 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
4795 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
4796 value &= ~(0x3 << 10);
4797 value |= env->cp15.cptr_el[2] & (0x3 << 10);
4798 }
4799 env->cp15.cptr_el[2] = value;
4800 }
4801
cptr_el2_read(CPUARMState * env,const ARMCPRegInfo * ri)4802 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
4803 {
4804 /*
4805 * For A-profile AArch32 EL3, if NSACR.CP10
4806 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4807 */
4808 uint64_t value = env->cp15.cptr_el[2];
4809
4810 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
4811 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
4812 value |= 0x3 << 10;
4813 }
4814 return value;
4815 }
4816
4817 static const ARMCPRegInfo el2_cp_reginfo[] = {
4818 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
4819 .type = ARM_CP_IO,
4820 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4821 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4822 .writefn = hcr_write },
4823 { .name = "HCR", .state = ARM_CP_STATE_AA32,
4824 .type = ARM_CP_ALIAS | ARM_CP_IO,
4825 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4826 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4827 .writefn = hcr_writelow },
4828 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
4829 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
4830 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4831 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
4832 .type = ARM_CP_ALIAS,
4833 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
4834 .access = PL2_RW,
4835 .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
4836 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4837 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4838 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
4839 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4840 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4841 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
4842 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4843 .type = ARM_CP_ALIAS,
4844 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4845 .access = PL2_RW,
4846 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
4847 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
4848 .type = ARM_CP_ALIAS,
4849 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
4850 .access = PL2_RW,
4851 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
4852 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4853 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4854 .access = PL2_RW, .writefn = vbar_write,
4855 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
4856 .resetvalue = 0 },
4857 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
4858 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
4859 .access = PL3_RW, .type = ARM_CP_ALIAS,
4860 .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
4861 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4862 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4863 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
4864 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
4865 .readfn = cptr_el2_read, .writefn = cptr_el2_write },
4866 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4867 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4868 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
4869 .resetvalue = 0 },
4870 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4871 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4872 .access = PL2_RW, .type = ARM_CP_ALIAS,
4873 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
4874 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4875 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4876 .access = PL2_RW, .type = ARM_CP_CONST,
4877 .resetvalue = 0 },
4878 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4879 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
4880 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
4881 .access = PL2_RW, .type = ARM_CP_CONST,
4882 .resetvalue = 0 },
4883 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4884 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4885 .access = PL2_RW, .type = ARM_CP_CONST,
4886 .resetvalue = 0 },
4887 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4888 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4889 .access = PL2_RW, .type = ARM_CP_CONST,
4890 .resetvalue = 0 },
4891 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4892 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4893 .access = PL2_RW,
4894 /* no .writefn needed as this can't cause an ASID change;
4895 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4896 */
4897 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
4898 { .name = "VTCR", .state = ARM_CP_STATE_AA32,
4899 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4900 .type = ARM_CP_ALIAS,
4901 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4902 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4903 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
4904 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4905 .access = PL2_RW,
4906 /* no .writefn needed as this can't cause an ASID change;
4907 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4908 */
4909 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4910 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4911 .cp = 15, .opc1 = 6, .crm = 2,
4912 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4913 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4914 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
4915 .writefn = vttbr_write },
4916 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4917 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4918 .access = PL2_RW, .writefn = vttbr_write,
4919 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
4920 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4921 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4922 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
4923 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
4924 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4925 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4926 .access = PL2_RW, .resetvalue = 0,
4927 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
4928 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4929 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4930 .access = PL2_RW, .resetvalue = 0,
4931 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4932 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4933 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4934 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4935 { .name = "TLBIALLNSNH",
4936 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4937 .type = ARM_CP_NO_RAW, .access = PL2_W,
4938 .writefn = tlbiall_nsnh_write },
4939 { .name = "TLBIALLNSNHIS",
4940 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4941 .type = ARM_CP_NO_RAW, .access = PL2_W,
4942 .writefn = tlbiall_nsnh_is_write },
4943 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4944 .type = ARM_CP_NO_RAW, .access = PL2_W,
4945 .writefn = tlbiall_hyp_write },
4946 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4947 .type = ARM_CP_NO_RAW, .access = PL2_W,
4948 .writefn = tlbiall_hyp_is_write },
4949 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4950 .type = ARM_CP_NO_RAW, .access = PL2_W,
4951 .writefn = tlbimva_hyp_write },
4952 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
4953 .type = ARM_CP_NO_RAW, .access = PL2_W,
4954 .writefn = tlbimva_hyp_is_write },
4955 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
4956 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4957 .type = ARM_CP_NO_RAW, .access = PL2_W,
4958 .writefn = tlbi_aa64_alle2_write },
4959 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
4960 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4961 .type = ARM_CP_NO_RAW, .access = PL2_W,
4962 .writefn = tlbi_aa64_vae2_write },
4963 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
4964 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4965 .access = PL2_W, .type = ARM_CP_NO_RAW,
4966 .writefn = tlbi_aa64_vae2_write },
4967 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
4968 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4969 .access = PL2_W, .type = ARM_CP_NO_RAW,
4970 .writefn = tlbi_aa64_alle2is_write },
4971 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
4972 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
4973 .type = ARM_CP_NO_RAW, .access = PL2_W,
4974 .writefn = tlbi_aa64_vae2is_write },
4975 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
4976 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
4977 .access = PL2_W, .type = ARM_CP_NO_RAW,
4978 .writefn = tlbi_aa64_vae2is_write },
4979 #ifndef CONFIG_USER_ONLY
4980 /* Unlike the other EL2-related AT operations, these must
4981 * UNDEF from EL3 if EL2 is not implemented, which is why we
4982 * define them here rather than with the rest of the AT ops.
4983 */
4984 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
4985 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
4986 .access = PL2_W, .accessfn = at_s1e2_access,
4987 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
4988 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
4989 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
4990 .access = PL2_W, .accessfn = at_s1e2_access,
4991 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
4992 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
4993 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
4994 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
4995 * to behave as if SCR.NS was 1.
4996 */
4997 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
4998 .access = PL2_W,
4999 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
5000 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5001 .access = PL2_W,
5002 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
5003 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
5004 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
5005 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
5006 * reset values as IMPDEF. We choose to reset to 3 to comply with
5007 * both ARMv7 and ARMv8.
5008 */
5009 .access = PL2_RW, .resetvalue = 3,
5010 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
5011 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
5012 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
5013 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
5014 .writefn = gt_cntvoff_write,
5015 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
5016 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
5017 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
5018 .writefn = gt_cntvoff_write,
5019 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
5020 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5021 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
5022 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5023 .type = ARM_CP_IO, .access = PL2_RW,
5024 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5025 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
5026 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5027 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
5028 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5029 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5030 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
5031 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
5032 .resetfn = gt_hyp_timer_reset,
5033 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
5034 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5035 .type = ARM_CP_IO,
5036 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
5037 .access = PL2_RW,
5038 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
5039 .resetvalue = 0,
5040 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
5041 #endif
5042 /* The only field of MDCR_EL2 that has a defined architectural reset value
5043 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
5044 * don't implement any PMU event counters, so using zero as a reset
5045 * value for MDCR_EL2 is okay
5046 */
5047 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
5048 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
5049 .access = PL2_RW, .resetvalue = 0,
5050 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
5051 { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
5052 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5053 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5054 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
5055 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
5056 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5057 .access = PL2_RW,
5058 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
5059 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
5060 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
5061 .access = PL2_RW,
5062 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
5063 REGINFO_SENTINEL
5064 };
5065
5066 static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
5067 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
5068 .type = ARM_CP_ALIAS | ARM_CP_IO,
5069 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5070 .access = PL2_RW,
5071 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
5072 .writefn = hcr_writehigh },
5073 REGINFO_SENTINEL
5074 };
5075
nsacr_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5076 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
5077 bool isread)
5078 {
5079 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
5080 * At Secure EL1 it traps to EL3.
5081 */
5082 if (arm_current_el(env) == 3) {
5083 return CP_ACCESS_OK;
5084 }
5085 if (arm_is_secure_below_el3(env)) {
5086 return CP_ACCESS_TRAP_EL3;
5087 }
5088 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
5089 if (isread) {
5090 return CP_ACCESS_OK;
5091 }
5092 return CP_ACCESS_TRAP_UNCATEGORIZED;
5093 }
5094
5095 static const ARMCPRegInfo el3_cp_reginfo[] = {
5096 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
5097 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
5098 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
5099 .resetvalue = 0, .writefn = scr_write },
5100 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
5101 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
5102 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5103 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
5104 .writefn = scr_write },
5105 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
5106 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
5107 .access = PL3_RW, .resetvalue = 0,
5108 .fieldoffset = offsetof(CPUARMState, cp15.sder) },
5109 { .name = "SDER",
5110 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
5111 .access = PL3_RW, .resetvalue = 0,
5112 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
5113 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
5114 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5115 .writefn = vbar_write, .resetvalue = 0,
5116 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
5117 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
5118 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
5119 .access = PL3_RW, .resetvalue = 0,
5120 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
5121 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
5122 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
5123 .access = PL3_RW,
5124 /* no .writefn needed as this can't cause an ASID change;
5125 * we must provide a .raw_writefn and .resetfn because we handle
5126 * reset and migration for the AArch32 TTBCR(S), which might be
5127 * using mask and base_mask.
5128 */
5129 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
5130 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
5131 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
5132 .type = ARM_CP_ALIAS,
5133 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
5134 .access = PL3_RW,
5135 .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
5136 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
5137 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
5138 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
5139 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
5140 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
5141 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
5142 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
5143 .type = ARM_CP_ALIAS,
5144 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
5145 .access = PL3_RW,
5146 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
5147 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
5148 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
5149 .access = PL3_RW, .writefn = vbar_write,
5150 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
5151 .resetvalue = 0 },
5152 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
5153 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
5154 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
5155 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
5156 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
5157 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
5158 .access = PL3_RW, .resetvalue = 0,
5159 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
5160 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
5161 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
5162 .access = PL3_RW, .type = ARM_CP_CONST,
5163 .resetvalue = 0 },
5164 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
5165 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
5166 .access = PL3_RW, .type = ARM_CP_CONST,
5167 .resetvalue = 0 },
5168 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
5169 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
5170 .access = PL3_RW, .type = ARM_CP_CONST,
5171 .resetvalue = 0 },
5172 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
5173 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
5174 .access = PL3_W, .type = ARM_CP_NO_RAW,
5175 .writefn = tlbi_aa64_alle3is_write },
5176 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
5177 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
5178 .access = PL3_W, .type = ARM_CP_NO_RAW,
5179 .writefn = tlbi_aa64_vae3is_write },
5180 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
5181 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
5182 .access = PL3_W, .type = ARM_CP_NO_RAW,
5183 .writefn = tlbi_aa64_vae3is_write },
5184 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
5185 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
5186 .access = PL3_W, .type = ARM_CP_NO_RAW,
5187 .writefn = tlbi_aa64_alle3_write },
5188 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
5189 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
5190 .access = PL3_W, .type = ARM_CP_NO_RAW,
5191 .writefn = tlbi_aa64_vae3_write },
5192 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
5193 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
5194 .access = PL3_W, .type = ARM_CP_NO_RAW,
5195 .writefn = tlbi_aa64_vae3_write },
5196 REGINFO_SENTINEL
5197 };
5198
ctr_el0_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5199 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
5200 bool isread)
5201 {
5202 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
5203 * but the AArch32 CTR has its own reginfo struct)
5204 */
5205 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
5206 return CP_ACCESS_TRAP;
5207 }
5208 return CP_ACCESS_OK;
5209 }
5210
oslar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5211 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
5212 uint64_t value)
5213 {
5214 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
5215 * read via a bit in OSLSR_EL1.
5216 */
5217 int oslock;
5218
5219 if (ri->state == ARM_CP_STATE_AA32) {
5220 oslock = (value == 0xC5ACCE55);
5221 } else {
5222 oslock = value & 1;
5223 }
5224
5225 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
5226 }
5227
5228 static const ARMCPRegInfo debug_cp_reginfo[] = {
5229 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
5230 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
5231 * unlike DBGDRAR it is never accessible from EL0.
5232 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
5233 * accessor.
5234 */
5235 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
5236 .access = PL0_R, .accessfn = access_tdra,
5237 .type = ARM_CP_CONST, .resetvalue = 0 },
5238 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
5239 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
5240 .access = PL1_R, .accessfn = access_tdra,
5241 .type = ARM_CP_CONST, .resetvalue = 0 },
5242 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
5243 .access = PL0_R, .accessfn = access_tdra,
5244 .type = ARM_CP_CONST, .resetvalue = 0 },
5245 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
5246 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
5247 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
5248 .access = PL1_RW, .accessfn = access_tda,
5249 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
5250 .resetvalue = 0 },
5251 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
5252 * We don't implement the configurable EL0 access.
5253 */
5254 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
5255 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
5256 .type = ARM_CP_ALIAS,
5257 .access = PL1_R, .accessfn = access_tda,
5258 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
5259 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
5260 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
5261 .access = PL1_W, .type = ARM_CP_NO_RAW,
5262 .accessfn = access_tdosa,
5263 .writefn = oslar_write },
5264 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
5265 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
5266 .access = PL1_R, .resetvalue = 10,
5267 .accessfn = access_tdosa,
5268 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
5269 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
5270 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
5271 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
5272 .access = PL1_RW, .accessfn = access_tdosa,
5273 .type = ARM_CP_NOP },
5274 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
5275 * implement vector catch debug events yet.
5276 */
5277 { .name = "DBGVCR",
5278 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
5279 .access = PL1_RW, .accessfn = access_tda,
5280 .type = ARM_CP_NOP },
5281 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
5282 * to save and restore a 32-bit guest's DBGVCR)
5283 */
5284 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
5285 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
5286 .access = PL2_RW, .accessfn = access_tda,
5287 .type = ARM_CP_NOP },
5288 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
5289 * Channel but Linux may try to access this register. The 32-bit
5290 * alias is DBGDCCINT.
5291 */
5292 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
5293 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
5294 .access = PL1_RW, .accessfn = access_tda,
5295 .type = ARM_CP_NOP },
5296 REGINFO_SENTINEL
5297 };
5298
5299 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
5300 /* 64 bit access versions of the (dummy) debug registers */
5301 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
5302 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
5303 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
5304 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
5305 REGINFO_SENTINEL
5306 };
5307
5308 /* Return the exception level to which exceptions should be taken
5309 * via SVEAccessTrap. If an exception should be routed through
5310 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
5311 * take care of raising that exception.
5312 * C.f. the ARM pseudocode function CheckSVEEnabled.
5313 */
sve_exception_el(CPUARMState * env,int el)5314 int sve_exception_el(CPUARMState *env, int el)
5315 {
5316 #ifndef CONFIG_USER_ONLY
5317 if (el <= 1) {
5318 bool disabled = false;
5319
5320 /* The CPACR.ZEN controls traps to EL1:
5321 * 0, 2 : trap EL0 and EL1 accesses
5322 * 1 : trap only EL0 accesses
5323 * 3 : trap no accesses
5324 */
5325 if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
5326 disabled = true;
5327 } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
5328 disabled = el == 0;
5329 }
5330 if (disabled) {
5331 /* route_to_el2 */
5332 return (arm_feature(env, ARM_FEATURE_EL2)
5333 && (arm_hcr_el2_eff(env) & HCR_TGE) ? 2 : 1);
5334 }
5335
5336 /* Check CPACR.FPEN. */
5337 if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
5338 disabled = true;
5339 } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
5340 disabled = el == 0;
5341 }
5342 if (disabled) {
5343 return 0;
5344 }
5345 }
5346
5347 /* CPTR_EL2. Since TZ and TFP are positive,
5348 * they will be zero when EL2 is not present.
5349 */
5350 if (el <= 2 && !arm_is_secure_below_el3(env)) {
5351 if (env->cp15.cptr_el[2] & CPTR_TZ) {
5352 return 2;
5353 }
5354 if (env->cp15.cptr_el[2] & CPTR_TFP) {
5355 return 0;
5356 }
5357 }
5358
5359 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
5360 if (arm_feature(env, ARM_FEATURE_EL3)
5361 && !(env->cp15.cptr_el[3] & CPTR_EZ)) {
5362 return 3;
5363 }
5364 #endif
5365 return 0;
5366 }
5367
sve_zcr_get_valid_len(ARMCPU * cpu,uint32_t start_len)5368 static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len)
5369 {
5370 uint32_t end_len;
5371
5372 end_len = start_len &= 0xf;
5373 if (!test_bit(start_len, cpu->sve_vq_map)) {
5374 end_len = find_last_bit(cpu->sve_vq_map, start_len);
5375 assert(end_len < start_len);
5376 }
5377 return end_len;
5378 }
5379
5380 /*
5381 * Given that SVE is enabled, return the vector length for EL.
5382 */
sve_zcr_len_for_el(CPUARMState * env,int el)5383 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
5384 {
5385 ARMCPU *cpu = env_archcpu(env);
5386 uint32_t zcr_len = cpu->sve_max_vq - 1;
5387
5388 if (el <= 1) {
5389 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
5390 }
5391 if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
5392 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
5393 }
5394 if (arm_feature(env, ARM_FEATURE_EL3)) {
5395 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
5396 }
5397
5398 return sve_zcr_get_valid_len(cpu, zcr_len);
5399 }
5400
zcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5401 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5402 uint64_t value)
5403 {
5404 int cur_el = arm_current_el(env);
5405 int old_len = sve_zcr_len_for_el(env, cur_el);
5406 int new_len;
5407
5408 /* Bits other than [3:0] are RAZ/WI. */
5409 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
5410 raw_write(env, ri, value & 0xf);
5411
5412 /*
5413 * Because we arrived here, we know both FP and SVE are enabled;
5414 * otherwise we would have trapped access to the ZCR_ELn register.
5415 */
5416 new_len = sve_zcr_len_for_el(env, cur_el);
5417 if (new_len < old_len) {
5418 aarch64_sve_narrow_vq(env, new_len + 1);
5419 }
5420 }
5421
5422 static const ARMCPRegInfo zcr_el1_reginfo = {
5423 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
5424 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
5425 .access = PL1_RW, .type = ARM_CP_SVE,
5426 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
5427 .writefn = zcr_write, .raw_writefn = raw_write
5428 };
5429
5430 static const ARMCPRegInfo zcr_el2_reginfo = {
5431 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
5432 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
5433 .access = PL2_RW, .type = ARM_CP_SVE,
5434 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
5435 .writefn = zcr_write, .raw_writefn = raw_write
5436 };
5437
5438 static const ARMCPRegInfo zcr_no_el2_reginfo = {
5439 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
5440 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
5441 .access = PL2_RW, .type = ARM_CP_SVE,
5442 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
5443 };
5444
5445 static const ARMCPRegInfo zcr_el3_reginfo = {
5446 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
5447 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
5448 .access = PL3_RW, .type = ARM_CP_SVE,
5449 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
5450 .writefn = zcr_write, .raw_writefn = raw_write
5451 };
5452
hw_watchpoint_update(ARMCPU * cpu,int n)5453 void hw_watchpoint_update(ARMCPU *cpu, int n)
5454 {
5455 CPUARMState *env = &cpu->env;
5456 vaddr len = 0;
5457 vaddr wvr = env->cp15.dbgwvr[n];
5458 uint64_t wcr = env->cp15.dbgwcr[n];
5459 int mask;
5460 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
5461
5462 if (env->cpu_watchpoint[n]) {
5463 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
5464 env->cpu_watchpoint[n] = NULL;
5465 }
5466
5467 if (!extract64(wcr, 0, 1)) {
5468 /* E bit clear : watchpoint disabled */
5469 return;
5470 }
5471
5472 switch (extract64(wcr, 3, 2)) {
5473 case 0:
5474 /* LSC 00 is reserved and must behave as if the wp is disabled */
5475 return;
5476 case 1:
5477 flags |= BP_MEM_READ;
5478 break;
5479 case 2:
5480 flags |= BP_MEM_WRITE;
5481 break;
5482 case 3:
5483 flags |= BP_MEM_ACCESS;
5484 break;
5485 }
5486
5487 /* Attempts to use both MASK and BAS fields simultaneously are
5488 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
5489 * thus generating a watchpoint for every byte in the masked region.
5490 */
5491 mask = extract64(wcr, 24, 4);
5492 if (mask == 1 || mask == 2) {
5493 /* Reserved values of MASK; we must act as if the mask value was
5494 * some non-reserved value, or as if the watchpoint were disabled.
5495 * We choose the latter.
5496 */
5497 return;
5498 } else if (mask) {
5499 /* Watchpoint covers an aligned area up to 2GB in size */
5500 len = 1ULL << mask;
5501 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
5502 * whether the watchpoint fires when the unmasked bits match; we opt
5503 * to generate the exceptions.
5504 */
5505 wvr &= ~(len - 1);
5506 } else {
5507 /* Watchpoint covers bytes defined by the byte address select bits */
5508 int bas = extract64(wcr, 5, 8);
5509 int basstart;
5510
5511 if (bas == 0) {
5512 /* This must act as if the watchpoint is disabled */
5513 return;
5514 }
5515
5516 if (extract64(wvr, 2, 1)) {
5517 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
5518 * ignored, and BAS[3:0] define which bytes to watch.
5519 */
5520 bas &= 0xf;
5521 }
5522 /* The BAS bits are supposed to be programmed to indicate a contiguous
5523 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
5524 * we fire for each byte in the word/doubleword addressed by the WVR.
5525 * We choose to ignore any non-zero bits after the first range of 1s.
5526 */
5527 basstart = ctz32(bas);
5528 len = cto32(bas >> basstart);
5529 wvr += basstart;
5530 }
5531
5532 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
5533 &env->cpu_watchpoint[n]);
5534 }
5535
hw_watchpoint_update_all(ARMCPU * cpu)5536 void hw_watchpoint_update_all(ARMCPU *cpu)
5537 {
5538 int i;
5539 CPUARMState *env = &cpu->env;
5540
5541 /* Completely clear out existing QEMU watchpoints and our array, to
5542 * avoid possible stale entries following migration load.
5543 */
5544 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
5545 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
5546
5547 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
5548 hw_watchpoint_update(cpu, i);
5549 }
5550 }
5551
dbgwvr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5552 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5553 uint64_t value)
5554 {
5555 ARMCPU *cpu = env_archcpu(env);
5556 int i = ri->crm;
5557
5558 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
5559 * register reads and behaves as if values written are sign extended.
5560 * Bits [1:0] are RES0.
5561 */
5562 value = sextract64(value, 0, 49) & ~3ULL;
5563
5564 raw_write(env, ri, value);
5565 hw_watchpoint_update(cpu, i);
5566 }
5567
dbgwcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5568 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5569 uint64_t value)
5570 {
5571 ARMCPU *cpu = env_archcpu(env);
5572 int i = ri->crm;
5573
5574 raw_write(env, ri, value);
5575 hw_watchpoint_update(cpu, i);
5576 }
5577
hw_breakpoint_update(ARMCPU * cpu,int n)5578 void hw_breakpoint_update(ARMCPU *cpu, int n)
5579 {
5580 CPUARMState *env = &cpu->env;
5581 uint64_t bvr = env->cp15.dbgbvr[n];
5582 uint64_t bcr = env->cp15.dbgbcr[n];
5583 vaddr addr;
5584 int bt;
5585 int flags = BP_CPU;
5586
5587 if (env->cpu_breakpoint[n]) {
5588 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
5589 env->cpu_breakpoint[n] = NULL;
5590 }
5591
5592 if (!extract64(bcr, 0, 1)) {
5593 /* E bit clear : watchpoint disabled */
5594 return;
5595 }
5596
5597 bt = extract64(bcr, 20, 4);
5598
5599 switch (bt) {
5600 case 4: /* unlinked address mismatch (reserved if AArch64) */
5601 case 5: /* linked address mismatch (reserved if AArch64) */
5602 qemu_log_mask(LOG_UNIMP,
5603 "arm: address mismatch breakpoint types not implemented\n");
5604 return;
5605 case 0: /* unlinked address match */
5606 case 1: /* linked address match */
5607 {
5608 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
5609 * we behave as if the register was sign extended. Bits [1:0] are
5610 * RES0. The BAS field is used to allow setting breakpoints on 16
5611 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
5612 * a bp will fire if the addresses covered by the bp and the addresses
5613 * covered by the insn overlap but the insn doesn't start at the
5614 * start of the bp address range. We choose to require the insn and
5615 * the bp to have the same address. The constraints on writing to
5616 * BAS enforced in dbgbcr_write mean we have only four cases:
5617 * 0b0000 => no breakpoint
5618 * 0b0011 => breakpoint on addr
5619 * 0b1100 => breakpoint on addr + 2
5620 * 0b1111 => breakpoint on addr
5621 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
5622 */
5623 int bas = extract64(bcr, 5, 4);
5624 addr = sextract64(bvr, 0, 49) & ~3ULL;
5625 if (bas == 0) {
5626 return;
5627 }
5628 if (bas == 0xc) {
5629 addr += 2;
5630 }
5631 break;
5632 }
5633 case 2: /* unlinked context ID match */
5634 case 8: /* unlinked VMID match (reserved if no EL2) */
5635 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
5636 qemu_log_mask(LOG_UNIMP,
5637 "arm: unlinked context breakpoint types not implemented\n");
5638 return;
5639 case 9: /* linked VMID match (reserved if no EL2) */
5640 case 11: /* linked context ID and VMID match (reserved if no EL2) */
5641 case 3: /* linked context ID match */
5642 default:
5643 /* We must generate no events for Linked context matches (unless
5644 * they are linked to by some other bp/wp, which is handled in
5645 * updates for the linking bp/wp). We choose to also generate no events
5646 * for reserved values.
5647 */
5648 return;
5649 }
5650
5651 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
5652 }
5653
hw_breakpoint_update_all(ARMCPU * cpu)5654 void hw_breakpoint_update_all(ARMCPU *cpu)
5655 {
5656 int i;
5657 CPUARMState *env = &cpu->env;
5658
5659 /* Completely clear out existing QEMU breakpoints and our array, to
5660 * avoid possible stale entries following migration load.
5661 */
5662 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
5663 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
5664
5665 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
5666 hw_breakpoint_update(cpu, i);
5667 }
5668 }
5669
dbgbvr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5670 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5671 uint64_t value)
5672 {
5673 ARMCPU *cpu = env_archcpu(env);
5674 int i = ri->crm;
5675
5676 raw_write(env, ri, value);
5677 hw_breakpoint_update(cpu, i);
5678 }
5679
dbgbcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)5680 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5681 uint64_t value)
5682 {
5683 ARMCPU *cpu = env_archcpu(env);
5684 int i = ri->crm;
5685
5686 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
5687 * copy of BAS[0].
5688 */
5689 value = deposit64(value, 6, 1, extract64(value, 5, 1));
5690 value = deposit64(value, 8, 1, extract64(value, 7, 1));
5691
5692 raw_write(env, ri, value);
5693 hw_breakpoint_update(cpu, i);
5694 }
5695
define_debug_regs(ARMCPU * cpu)5696 static void define_debug_regs(ARMCPU *cpu)
5697 {
5698 /* Define v7 and v8 architectural debug registers.
5699 * These are just dummy implementations for now.
5700 */
5701 int i;
5702 int wrps, brps, ctx_cmps;
5703 ARMCPRegInfo dbgdidr = {
5704 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
5705 .access = PL0_R, .accessfn = access_tda,
5706 .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
5707 };
5708
5709 /* Note that all these register fields hold "number of Xs minus 1". */
5710 brps = extract32(cpu->dbgdidr, 24, 4);
5711 wrps = extract32(cpu->dbgdidr, 28, 4);
5712 ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
5713
5714 assert(ctx_cmps <= brps);
5715
5716 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
5717 * of the debug registers such as number of breakpoints;
5718 * check that if they both exist then they agree.
5719 */
5720 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
5721 assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
5722 assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
5723 assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
5724 }
5725
5726 define_one_arm_cp_reg(cpu, &dbgdidr);
5727 define_arm_cp_regs(cpu, debug_cp_reginfo);
5728
5729 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
5730 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
5731 }
5732
5733 for (i = 0; i < brps + 1; i++) {
5734 ARMCPRegInfo dbgregs[] = {
5735 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
5736 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
5737 .access = PL1_RW, .accessfn = access_tda,
5738 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
5739 .writefn = dbgbvr_write, .raw_writefn = raw_write
5740 },
5741 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
5742 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
5743 .access = PL1_RW, .accessfn = access_tda,
5744 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
5745 .writefn = dbgbcr_write, .raw_writefn = raw_write
5746 },
5747 REGINFO_SENTINEL
5748 };
5749 define_arm_cp_regs(cpu, dbgregs);
5750 }
5751
5752 for (i = 0; i < wrps + 1; i++) {
5753 ARMCPRegInfo dbgregs[] = {
5754 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
5755 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
5756 .access = PL1_RW, .accessfn = access_tda,
5757 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
5758 .writefn = dbgwvr_write, .raw_writefn = raw_write
5759 },
5760 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
5761 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
5762 .access = PL1_RW, .accessfn = access_tda,
5763 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
5764 .writefn = dbgwcr_write, .raw_writefn = raw_write
5765 },
5766 REGINFO_SENTINEL
5767 };
5768 define_arm_cp_regs(cpu, dbgregs);
5769 }
5770 }
5771
5772 /* We don't know until after realize whether there's a GICv3
5773 * attached, and that is what registers the gicv3 sysregs.
5774 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
5775 * at runtime.
5776 */
id_pfr1_read(CPUARMState * env,const ARMCPRegInfo * ri)5777 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
5778 {
5779 ARMCPU *cpu = env_archcpu(env);
5780 uint64_t pfr1 = cpu->id_pfr1;
5781
5782 if (env->gicv3state) {
5783 pfr1 |= 1 << 28;
5784 }
5785 return pfr1;
5786 }
5787
id_aa64pfr0_read(CPUARMState * env,const ARMCPRegInfo * ri)5788 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
5789 {
5790 ARMCPU *cpu = env_archcpu(env);
5791 uint64_t pfr0 = cpu->isar.id_aa64pfr0;
5792
5793 if (env->gicv3state) {
5794 pfr0 |= 1 << 24;
5795 }
5796 return pfr0;
5797 }
5798
5799 /* Shared logic between LORID and the rest of the LOR* registers.
5800 * Secure state has already been delt with.
5801 */
access_lor_ns(CPUARMState * env)5802 static CPAccessResult access_lor_ns(CPUARMState *env)
5803 {
5804 int el = arm_current_el(env);
5805
5806 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
5807 return CP_ACCESS_TRAP_EL2;
5808 }
5809 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
5810 return CP_ACCESS_TRAP_EL3;
5811 }
5812 return CP_ACCESS_OK;
5813 }
5814
access_lorid(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5815 static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri,
5816 bool isread)
5817 {
5818 if (arm_is_secure_below_el3(env)) {
5819 /* Access ok in secure mode. */
5820 return CP_ACCESS_OK;
5821 }
5822 return access_lor_ns(env);
5823 }
5824
access_lor_other(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5825 static CPAccessResult access_lor_other(CPUARMState *env,
5826 const ARMCPRegInfo *ri, bool isread)
5827 {
5828 if (arm_is_secure_below_el3(env)) {
5829 /* Access denied in secure mode. */
5830 return CP_ACCESS_TRAP;
5831 }
5832 return access_lor_ns(env);
5833 }
5834
5835 #ifdef TARGET_AARCH64
access_pauth(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5836 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
5837 bool isread)
5838 {
5839 int el = arm_current_el(env);
5840
5841 if (el < 2 &&
5842 arm_feature(env, ARM_FEATURE_EL2) &&
5843 !(arm_hcr_el2_eff(env) & HCR_APK)) {
5844 return CP_ACCESS_TRAP_EL2;
5845 }
5846 if (el < 3 &&
5847 arm_feature(env, ARM_FEATURE_EL3) &&
5848 !(env->cp15.scr_el3 & SCR_APK)) {
5849 return CP_ACCESS_TRAP_EL3;
5850 }
5851 return CP_ACCESS_OK;
5852 }
5853
5854 static const ARMCPRegInfo pauth_reginfo[] = {
5855 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5856 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
5857 .access = PL1_RW, .accessfn = access_pauth,
5858 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
5859 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5860 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
5861 .access = PL1_RW, .accessfn = access_pauth,
5862 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
5863 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5864 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
5865 .access = PL1_RW, .accessfn = access_pauth,
5866 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
5867 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5868 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
5869 .access = PL1_RW, .accessfn = access_pauth,
5870 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
5871 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5872 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
5873 .access = PL1_RW, .accessfn = access_pauth,
5874 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
5875 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5876 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
5877 .access = PL1_RW, .accessfn = access_pauth,
5878 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
5879 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5880 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
5881 .access = PL1_RW, .accessfn = access_pauth,
5882 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
5883 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5884 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
5885 .access = PL1_RW, .accessfn = access_pauth,
5886 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
5887 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5888 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
5889 .access = PL1_RW, .accessfn = access_pauth,
5890 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
5891 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5892 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
5893 .access = PL1_RW, .accessfn = access_pauth,
5894 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
5895 REGINFO_SENTINEL
5896 };
5897
rndr_readfn(CPUARMState * env,const ARMCPRegInfo * ri)5898 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
5899 {
5900 Error *err = NULL;
5901 uint64_t ret;
5902
5903 /* Success sets NZCV = 0000. */
5904 env->NF = env->CF = env->VF = 0, env->ZF = 1;
5905
5906 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
5907 /*
5908 * ??? Failed, for unknown reasons in the crypto subsystem.
5909 * The best we can do is log the reason and return the
5910 * timed-out indication to the guest. There is no reason
5911 * we know to expect this failure to be transitory, so the
5912 * guest may well hang retrying the operation.
5913 */
5914 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
5915 ri->name, error_get_pretty(err));
5916 error_free(err);
5917
5918 env->ZF = 0; /* NZCF = 0100 */
5919 return 0;
5920 }
5921 return ret;
5922 }
5923
5924 /* We do not support re-seeding, so the two registers operate the same. */
5925 static const ARMCPRegInfo rndr_reginfo[] = {
5926 { .name = "RNDR", .state = ARM_CP_STATE_AA64,
5927 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
5928 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
5929 .access = PL0_R, .readfn = rndr_readfn },
5930 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
5931 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
5932 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
5933 .access = PL0_R, .readfn = rndr_readfn },
5934 REGINFO_SENTINEL
5935 };
5936 #endif
5937
access_predinv(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5938 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
5939 bool isread)
5940 {
5941 int el = arm_current_el(env);
5942
5943 if (el == 0) {
5944 uint64_t sctlr = arm_sctlr(env, el);
5945 if (!(sctlr & SCTLR_EnRCTX)) {
5946 return CP_ACCESS_TRAP;
5947 }
5948 } else if (el == 1) {
5949 uint64_t hcr = arm_hcr_el2_eff(env);
5950 if (hcr & HCR_NV) {
5951 return CP_ACCESS_TRAP_EL2;
5952 }
5953 }
5954 return CP_ACCESS_OK;
5955 }
5956
5957 static const ARMCPRegInfo predinv_reginfo[] = {
5958 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
5959 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
5960 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5961 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
5962 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
5963 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5964 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
5965 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
5966 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5967 /*
5968 * Note the AArch32 opcodes have a different OPC1.
5969 */
5970 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
5971 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
5972 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5973 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
5974 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
5975 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5976 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
5977 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
5978 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5979 REGINFO_SENTINEL
5980 };
5981
access_aa64_tid3(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5982 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
5983 bool isread)
5984 {
5985 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
5986 return CP_ACCESS_TRAP_EL2;
5987 }
5988
5989 return CP_ACCESS_OK;
5990 }
5991
access_aa32_tid3(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)5992 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
5993 bool isread)
5994 {
5995 if (arm_feature(env, ARM_FEATURE_V8)) {
5996 return access_aa64_tid3(env, ri, isread);
5997 }
5998
5999 return CP_ACCESS_OK;
6000 }
6001
register_cp_regs_for_features(ARMCPU * cpu)6002 void register_cp_regs_for_features(ARMCPU *cpu)
6003 {
6004 /* Register all the coprocessor registers based on feature bits */
6005 CPUARMState *env = &cpu->env;
6006 if (arm_feature(env, ARM_FEATURE_M)) {
6007 /* M profile has no coprocessor registers */
6008 return;
6009 }
6010
6011 define_arm_cp_regs(cpu, cp_reginfo);
6012 if (!arm_feature(env, ARM_FEATURE_V8)) {
6013 /* Must go early as it is full of wildcards that may be
6014 * overridden by later definitions.
6015 */
6016 define_arm_cp_regs(cpu, not_v8_cp_reginfo);
6017 }
6018
6019 if (arm_feature(env, ARM_FEATURE_V6)) {
6020 /* The ID registers all have impdef reset values */
6021 ARMCPRegInfo v6_idregs[] = {
6022 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
6023 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
6024 .access = PL1_R, .type = ARM_CP_CONST,
6025 .accessfn = access_aa32_tid3,
6026 .resetvalue = cpu->id_pfr0 },
6027 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
6028 * the value of the GIC field until after we define these regs.
6029 */
6030 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
6031 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
6032 .access = PL1_R, .type = ARM_CP_NO_RAW,
6033 .accessfn = access_aa32_tid3,
6034 .readfn = id_pfr1_read,
6035 .writefn = arm_cp_write_ignore },
6036 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
6037 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
6038 .access = PL1_R, .type = ARM_CP_CONST,
6039 .accessfn = access_aa32_tid3,
6040 .resetvalue = cpu->id_dfr0 },
6041 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
6042 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
6043 .access = PL1_R, .type = ARM_CP_CONST,
6044 .accessfn = access_aa32_tid3,
6045 .resetvalue = cpu->id_afr0 },
6046 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
6047 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
6048 .access = PL1_R, .type = ARM_CP_CONST,
6049 .accessfn = access_aa32_tid3,
6050 .resetvalue = cpu->id_mmfr0 },
6051 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
6052 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
6053 .access = PL1_R, .type = ARM_CP_CONST,
6054 .accessfn = access_aa32_tid3,
6055 .resetvalue = cpu->id_mmfr1 },
6056 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
6057 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
6058 .access = PL1_R, .type = ARM_CP_CONST,
6059 .accessfn = access_aa32_tid3,
6060 .resetvalue = cpu->id_mmfr2 },
6061 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
6062 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
6063 .access = PL1_R, .type = ARM_CP_CONST,
6064 .accessfn = access_aa32_tid3,
6065 .resetvalue = cpu->id_mmfr3 },
6066 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
6067 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
6068 .access = PL1_R, .type = ARM_CP_CONST,
6069 .accessfn = access_aa32_tid3,
6070 .resetvalue = cpu->isar.id_isar0 },
6071 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
6072 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
6073 .access = PL1_R, .type = ARM_CP_CONST,
6074 .accessfn = access_aa32_tid3,
6075 .resetvalue = cpu->isar.id_isar1 },
6076 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
6077 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
6078 .access = PL1_R, .type = ARM_CP_CONST,
6079 .accessfn = access_aa32_tid3,
6080 .resetvalue = cpu->isar.id_isar2 },
6081 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
6082 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
6083 .access = PL1_R, .type = ARM_CP_CONST,
6084 .accessfn = access_aa32_tid3,
6085 .resetvalue = cpu->isar.id_isar3 },
6086 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
6087 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
6088 .access = PL1_R, .type = ARM_CP_CONST,
6089 .accessfn = access_aa32_tid3,
6090 .resetvalue = cpu->isar.id_isar4 },
6091 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
6092 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
6093 .access = PL1_R, .type = ARM_CP_CONST,
6094 .accessfn = access_aa32_tid3,
6095 .resetvalue = cpu->isar.id_isar5 },
6096 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
6097 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
6098 .access = PL1_R, .type = ARM_CP_CONST,
6099 .accessfn = access_aa32_tid3,
6100 .resetvalue = cpu->id_mmfr4 },
6101 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
6102 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
6103 .access = PL1_R, .type = ARM_CP_CONST,
6104 .accessfn = access_aa32_tid3,
6105 .resetvalue = cpu->isar.id_isar6 },
6106 REGINFO_SENTINEL
6107 };
6108 define_arm_cp_regs(cpu, v6_idregs);
6109 define_arm_cp_regs(cpu, v6_cp_reginfo);
6110 } else {
6111 define_arm_cp_regs(cpu, not_v6_cp_reginfo);
6112 }
6113 if (arm_feature(env, ARM_FEATURE_V6K)) {
6114 define_arm_cp_regs(cpu, v6k_cp_reginfo);
6115 }
6116 if (arm_feature(env, ARM_FEATURE_V7MP) &&
6117 !arm_feature(env, ARM_FEATURE_PMSA)) {
6118 define_arm_cp_regs(cpu, v7mp_cp_reginfo);
6119 }
6120 if (arm_feature(env, ARM_FEATURE_V7VE)) {
6121 define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
6122 }
6123 if (arm_feature(env, ARM_FEATURE_V7)) {
6124 /* v7 performance monitor control register: same implementor
6125 * field as main ID register, and we implement four counters in
6126 * addition to the cycle count register.
6127 */
6128 unsigned int i, pmcrn = 4;
6129 ARMCPRegInfo pmcr = {
6130 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
6131 .access = PL0_RW,
6132 .type = ARM_CP_IO | ARM_CP_ALIAS,
6133 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
6134 .accessfn = pmreg_access, .writefn = pmcr_write,
6135 .raw_writefn = raw_write,
6136 };
6137 ARMCPRegInfo pmcr64 = {
6138 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
6139 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
6140 .access = PL0_RW, .accessfn = pmreg_access,
6141 .type = ARM_CP_IO,
6142 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
6143 .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT),
6144 .writefn = pmcr_write, .raw_writefn = raw_write,
6145 };
6146 define_one_arm_cp_reg(cpu, &pmcr);
6147 define_one_arm_cp_reg(cpu, &pmcr64);
6148 for (i = 0; i < pmcrn; i++) {
6149 char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
6150 char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
6151 char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
6152 char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
6153 ARMCPRegInfo pmev_regs[] = {
6154 { .name = pmevcntr_name, .cp = 15, .crn = 14,
6155 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6156 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6157 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6158 .accessfn = pmreg_access },
6159 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
6160 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
6161 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6162 .type = ARM_CP_IO,
6163 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6164 .raw_readfn = pmevcntr_rawread,
6165 .raw_writefn = pmevcntr_rawwrite },
6166 { .name = pmevtyper_name, .cp = 15, .crn = 14,
6167 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6168 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6169 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6170 .accessfn = pmreg_access },
6171 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
6172 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
6173 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6174 .type = ARM_CP_IO,
6175 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6176 .raw_writefn = pmevtyper_rawwrite },
6177 REGINFO_SENTINEL
6178 };
6179 define_arm_cp_regs(cpu, pmev_regs);
6180 g_free(pmevcntr_name);
6181 g_free(pmevcntr_el0_name);
6182 g_free(pmevtyper_name);
6183 g_free(pmevtyper_el0_name);
6184 }
6185 ARMCPRegInfo clidr = {
6186 .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
6187 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
6188 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
6189 };
6190 define_one_arm_cp_reg(cpu, &clidr);
6191 define_arm_cp_regs(cpu, v7_cp_reginfo);
6192 define_debug_regs(cpu);
6193 } else {
6194 define_arm_cp_regs(cpu, not_v7_cp_reginfo);
6195 }
6196 if (FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) >= 4 &&
6197 FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) != 0xf) {
6198 ARMCPRegInfo v81_pmu_regs[] = {
6199 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
6200 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
6201 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6202 .resetvalue = extract64(cpu->pmceid0, 32, 32) },
6203 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
6204 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
6205 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6206 .resetvalue = extract64(cpu->pmceid1, 32, 32) },
6207 REGINFO_SENTINEL
6208 };
6209 define_arm_cp_regs(cpu, v81_pmu_regs);
6210 }
6211 if (arm_feature(env, ARM_FEATURE_V8)) {
6212 /* AArch64 ID registers, which all have impdef reset values.
6213 * Note that within the ID register ranges the unused slots
6214 * must all RAZ, not UNDEF; future architecture versions may
6215 * define new registers here.
6216 */
6217 ARMCPRegInfo v8_idregs[] = {
6218 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
6219 * know the right value for the GIC field until after we
6220 * define these regs.
6221 */
6222 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
6223 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
6224 .access = PL1_R, .type = ARM_CP_NO_RAW,
6225 .accessfn = access_aa64_tid3,
6226 .readfn = id_aa64pfr0_read,
6227 .writefn = arm_cp_write_ignore },
6228 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
6229 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
6230 .access = PL1_R, .type = ARM_CP_CONST,
6231 .accessfn = access_aa64_tid3,
6232 .resetvalue = cpu->isar.id_aa64pfr1},
6233 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6234 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
6235 .access = PL1_R, .type = ARM_CP_CONST,
6236 .accessfn = access_aa64_tid3,
6237 .resetvalue = 0 },
6238 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6239 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
6240 .access = PL1_R, .type = ARM_CP_CONST,
6241 .accessfn = access_aa64_tid3,
6242 .resetvalue = 0 },
6243 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
6244 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
6245 .access = PL1_R, .type = ARM_CP_CONST,
6246 .accessfn = access_aa64_tid3,
6247 /* At present, only SVEver == 0 is defined anyway. */
6248 .resetvalue = 0 },
6249 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6250 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
6251 .access = PL1_R, .type = ARM_CP_CONST,
6252 .accessfn = access_aa64_tid3,
6253 .resetvalue = 0 },
6254 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6255 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
6256 .access = PL1_R, .type = ARM_CP_CONST,
6257 .accessfn = access_aa64_tid3,
6258 .resetvalue = 0 },
6259 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6260 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
6261 .access = PL1_R, .type = ARM_CP_CONST,
6262 .accessfn = access_aa64_tid3,
6263 .resetvalue = 0 },
6264 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
6265 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
6266 .access = PL1_R, .type = ARM_CP_CONST,
6267 .accessfn = access_aa64_tid3,
6268 .resetvalue = cpu->id_aa64dfr0 },
6269 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
6270 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
6271 .access = PL1_R, .type = ARM_CP_CONST,
6272 .accessfn = access_aa64_tid3,
6273 .resetvalue = cpu->id_aa64dfr1 },
6274 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6275 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
6276 .access = PL1_R, .type = ARM_CP_CONST,
6277 .accessfn = access_aa64_tid3,
6278 .resetvalue = 0 },
6279 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6280 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
6281 .access = PL1_R, .type = ARM_CP_CONST,
6282 .accessfn = access_aa64_tid3,
6283 .resetvalue = 0 },
6284 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
6285 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
6286 .access = PL1_R, .type = ARM_CP_CONST,
6287 .accessfn = access_aa64_tid3,
6288 .resetvalue = cpu->id_aa64afr0 },
6289 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
6290 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
6291 .access = PL1_R, .type = ARM_CP_CONST,
6292 .accessfn = access_aa64_tid3,
6293 .resetvalue = cpu->id_aa64afr1 },
6294 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6295 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
6296 .access = PL1_R, .type = ARM_CP_CONST,
6297 .accessfn = access_aa64_tid3,
6298 .resetvalue = 0 },
6299 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6300 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
6301 .access = PL1_R, .type = ARM_CP_CONST,
6302 .accessfn = access_aa64_tid3,
6303 .resetvalue = 0 },
6304 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
6305 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
6306 .access = PL1_R, .type = ARM_CP_CONST,
6307 .accessfn = access_aa64_tid3,
6308 .resetvalue = cpu->isar.id_aa64isar0 },
6309 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
6310 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
6311 .access = PL1_R, .type = ARM_CP_CONST,
6312 .accessfn = access_aa64_tid3,
6313 .resetvalue = cpu->isar.id_aa64isar1 },
6314 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6315 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
6316 .access = PL1_R, .type = ARM_CP_CONST,
6317 .accessfn = access_aa64_tid3,
6318 .resetvalue = 0 },
6319 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6320 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
6321 .access = PL1_R, .type = ARM_CP_CONST,
6322 .accessfn = access_aa64_tid3,
6323 .resetvalue = 0 },
6324 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6325 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
6326 .access = PL1_R, .type = ARM_CP_CONST,
6327 .accessfn = access_aa64_tid3,
6328 .resetvalue = 0 },
6329 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6330 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
6331 .access = PL1_R, .type = ARM_CP_CONST,
6332 .accessfn = access_aa64_tid3,
6333 .resetvalue = 0 },
6334 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6335 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
6336 .access = PL1_R, .type = ARM_CP_CONST,
6337 .accessfn = access_aa64_tid3,
6338 .resetvalue = 0 },
6339 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6340 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
6341 .access = PL1_R, .type = ARM_CP_CONST,
6342 .accessfn = access_aa64_tid3,
6343 .resetvalue = 0 },
6344 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
6345 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
6346 .access = PL1_R, .type = ARM_CP_CONST,
6347 .accessfn = access_aa64_tid3,
6348 .resetvalue = cpu->isar.id_aa64mmfr0 },
6349 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
6350 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
6351 .access = PL1_R, .type = ARM_CP_CONST,
6352 .accessfn = access_aa64_tid3,
6353 .resetvalue = cpu->isar.id_aa64mmfr1 },
6354 { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6355 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
6356 .access = PL1_R, .type = ARM_CP_CONST,
6357 .accessfn = access_aa64_tid3,
6358 .resetvalue = 0 },
6359 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6360 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
6361 .access = PL1_R, .type = ARM_CP_CONST,
6362 .accessfn = access_aa64_tid3,
6363 .resetvalue = 0 },
6364 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6365 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
6366 .access = PL1_R, .type = ARM_CP_CONST,
6367 .accessfn = access_aa64_tid3,
6368 .resetvalue = 0 },
6369 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6370 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
6371 .access = PL1_R, .type = ARM_CP_CONST,
6372 .accessfn = access_aa64_tid3,
6373 .resetvalue = 0 },
6374 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6375 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
6376 .access = PL1_R, .type = ARM_CP_CONST,
6377 .accessfn = access_aa64_tid3,
6378 .resetvalue = 0 },
6379 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6380 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
6381 .access = PL1_R, .type = ARM_CP_CONST,
6382 .accessfn = access_aa64_tid3,
6383 .resetvalue = 0 },
6384 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
6385 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
6386 .access = PL1_R, .type = ARM_CP_CONST,
6387 .accessfn = access_aa64_tid3,
6388 .resetvalue = cpu->isar.mvfr0 },
6389 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
6390 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
6391 .access = PL1_R, .type = ARM_CP_CONST,
6392 .accessfn = access_aa64_tid3,
6393 .resetvalue = cpu->isar.mvfr1 },
6394 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
6395 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
6396 .access = PL1_R, .type = ARM_CP_CONST,
6397 .accessfn = access_aa64_tid3,
6398 .resetvalue = cpu->isar.mvfr2 },
6399 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6400 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
6401 .access = PL1_R, .type = ARM_CP_CONST,
6402 .accessfn = access_aa64_tid3,
6403 .resetvalue = 0 },
6404 { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6405 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
6406 .access = PL1_R, .type = ARM_CP_CONST,
6407 .accessfn = access_aa64_tid3,
6408 .resetvalue = 0 },
6409 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6410 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
6411 .access = PL1_R, .type = ARM_CP_CONST,
6412 .accessfn = access_aa64_tid3,
6413 .resetvalue = 0 },
6414 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6415 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
6416 .access = PL1_R, .type = ARM_CP_CONST,
6417 .accessfn = access_aa64_tid3,
6418 .resetvalue = 0 },
6419 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6420 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
6421 .access = PL1_R, .type = ARM_CP_CONST,
6422 .accessfn = access_aa64_tid3,
6423 .resetvalue = 0 },
6424 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
6425 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
6426 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6427 .resetvalue = extract64(cpu->pmceid0, 0, 32) },
6428 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
6429 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
6430 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6431 .resetvalue = cpu->pmceid0 },
6432 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
6433 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
6434 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6435 .resetvalue = extract64(cpu->pmceid1, 0, 32) },
6436 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
6437 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
6438 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6439 .resetvalue = cpu->pmceid1 },
6440 REGINFO_SENTINEL
6441 };
6442 #ifdef CONFIG_USER_ONLY
6443 ARMCPRegUserSpaceInfo v8_user_idregs[] = {
6444 { .name = "ID_AA64PFR0_EL1",
6445 .exported_bits = 0x000f000f00ff0000,
6446 .fixed_bits = 0x0000000000000011 },
6447 { .name = "ID_AA64PFR1_EL1",
6448 .exported_bits = 0x00000000000000f0 },
6449 { .name = "ID_AA64PFR*_EL1_RESERVED",
6450 .is_glob = true },
6451 { .name = "ID_AA64ZFR0_EL1" },
6452 { .name = "ID_AA64MMFR0_EL1",
6453 .fixed_bits = 0x00000000ff000000 },
6454 { .name = "ID_AA64MMFR1_EL1" },
6455 { .name = "ID_AA64MMFR*_EL1_RESERVED",
6456 .is_glob = true },
6457 { .name = "ID_AA64DFR0_EL1",
6458 .fixed_bits = 0x0000000000000006 },
6459 { .name = "ID_AA64DFR1_EL1" },
6460 { .name = "ID_AA64DFR*_EL1_RESERVED",
6461 .is_glob = true },
6462 { .name = "ID_AA64AFR*",
6463 .is_glob = true },
6464 { .name = "ID_AA64ISAR0_EL1",
6465 .exported_bits = 0x00fffffff0fffff0 },
6466 { .name = "ID_AA64ISAR1_EL1",
6467 .exported_bits = 0x000000f0ffffffff },
6468 { .name = "ID_AA64ISAR*_EL1_RESERVED",
6469 .is_glob = true },
6470 REGUSERINFO_SENTINEL
6471 };
6472 modify_arm_cp_regs(v8_idregs, v8_user_idregs);
6473 #endif
6474 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
6475 if (!arm_feature(env, ARM_FEATURE_EL3) &&
6476 !arm_feature(env, ARM_FEATURE_EL2)) {
6477 ARMCPRegInfo rvbar = {
6478 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
6479 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
6480 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
6481 };
6482 define_one_arm_cp_reg(cpu, &rvbar);
6483 }
6484 define_arm_cp_regs(cpu, v8_idregs);
6485 define_arm_cp_regs(cpu, v8_cp_reginfo);
6486 }
6487 if (arm_feature(env, ARM_FEATURE_EL2)) {
6488 uint64_t vmpidr_def = mpidr_read_val(env);
6489 ARMCPRegInfo vpidr_regs[] = {
6490 { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
6491 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6492 .access = PL2_RW, .accessfn = access_el3_aa32ns,
6493 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS,
6494 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
6495 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
6496 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6497 .access = PL2_RW, .resetvalue = cpu->midr,
6498 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
6499 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
6500 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6501 .access = PL2_RW, .accessfn = access_el3_aa32ns,
6502 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS,
6503 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
6504 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
6505 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6506 .access = PL2_RW,
6507 .resetvalue = vmpidr_def,
6508 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
6509 REGINFO_SENTINEL
6510 };
6511 define_arm_cp_regs(cpu, vpidr_regs);
6512 define_arm_cp_regs(cpu, el2_cp_reginfo);
6513 if (arm_feature(env, ARM_FEATURE_V8)) {
6514 define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
6515 }
6516 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
6517 if (!arm_feature(env, ARM_FEATURE_EL3)) {
6518 ARMCPRegInfo rvbar = {
6519 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
6520 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
6521 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
6522 };
6523 define_one_arm_cp_reg(cpu, &rvbar);
6524 }
6525 } else {
6526 /* If EL2 is missing but higher ELs are enabled, we need to
6527 * register the no_el2 reginfos.
6528 */
6529 if (arm_feature(env, ARM_FEATURE_EL3)) {
6530 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
6531 * of MIDR_EL1 and MPIDR_EL1.
6532 */
6533 ARMCPRegInfo vpidr_regs[] = {
6534 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
6535 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6536 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
6537 .type = ARM_CP_CONST, .resetvalue = cpu->midr,
6538 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
6539 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
6540 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6541 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
6542 .type = ARM_CP_NO_RAW,
6543 .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
6544 REGINFO_SENTINEL
6545 };
6546 define_arm_cp_regs(cpu, vpidr_regs);
6547 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
6548 if (arm_feature(env, ARM_FEATURE_V8)) {
6549 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo);
6550 }
6551 }
6552 }
6553 if (arm_feature(env, ARM_FEATURE_EL3)) {
6554 define_arm_cp_regs(cpu, el3_cp_reginfo);
6555 ARMCPRegInfo el3_regs[] = {
6556 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
6557 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
6558 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
6559 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
6560 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
6561 .access = PL3_RW,
6562 .raw_writefn = raw_write, .writefn = sctlr_write,
6563 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
6564 .resetvalue = cpu->reset_sctlr },
6565 REGINFO_SENTINEL
6566 };
6567
6568 define_arm_cp_regs(cpu, el3_regs);
6569 }
6570 /* The behaviour of NSACR is sufficiently various that we don't
6571 * try to describe it in a single reginfo:
6572 * if EL3 is 64 bit, then trap to EL3 from S EL1,
6573 * reads as constant 0xc00 from NS EL1 and NS EL2
6574 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
6575 * if v7 without EL3, register doesn't exist
6576 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
6577 */
6578 if (arm_feature(env, ARM_FEATURE_EL3)) {
6579 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6580 ARMCPRegInfo nsacr = {
6581 .name = "NSACR", .type = ARM_CP_CONST,
6582 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6583 .access = PL1_RW, .accessfn = nsacr_access,
6584 .resetvalue = 0xc00
6585 };
6586 define_one_arm_cp_reg(cpu, &nsacr);
6587 } else {
6588 ARMCPRegInfo nsacr = {
6589 .name = "NSACR",
6590 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6591 .access = PL3_RW | PL1_R,
6592 .resetvalue = 0,
6593 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
6594 };
6595 define_one_arm_cp_reg(cpu, &nsacr);
6596 }
6597 } else {
6598 if (arm_feature(env, ARM_FEATURE_V8)) {
6599 ARMCPRegInfo nsacr = {
6600 .name = "NSACR", .type = ARM_CP_CONST,
6601 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6602 .access = PL1_R,
6603 .resetvalue = 0xc00
6604 };
6605 define_one_arm_cp_reg(cpu, &nsacr);
6606 }
6607 }
6608
6609 if (arm_feature(env, ARM_FEATURE_PMSA)) {
6610 if (arm_feature(env, ARM_FEATURE_V6)) {
6611 /* PMSAv6 not implemented */
6612 assert(arm_feature(env, ARM_FEATURE_V7));
6613 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
6614 define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
6615 } else {
6616 define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
6617 }
6618 } else {
6619 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
6620 define_arm_cp_regs(cpu, vmsa_cp_reginfo);
6621 /* TTCBR2 is introduced with ARMv8.2-A32HPD. */
6622 if (FIELD_EX32(cpu->id_mmfr4, ID_MMFR4, HPDS) != 0) {
6623 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
6624 }
6625 }
6626 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6627 define_arm_cp_regs(cpu, t2ee_cp_reginfo);
6628 }
6629 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
6630 define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
6631 }
6632 if (arm_feature(env, ARM_FEATURE_VAPA)) {
6633 define_arm_cp_regs(cpu, vapa_cp_reginfo);
6634 }
6635 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
6636 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
6637 }
6638 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
6639 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
6640 }
6641 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
6642 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
6643 }
6644 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
6645 define_arm_cp_regs(cpu, omap_cp_reginfo);
6646 }
6647 if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
6648 define_arm_cp_regs(cpu, strongarm_cp_reginfo);
6649 }
6650 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6651 define_arm_cp_regs(cpu, xscale_cp_reginfo);
6652 }
6653 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
6654 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
6655 }
6656 if (arm_feature(env, ARM_FEATURE_LPAE)) {
6657 define_arm_cp_regs(cpu, lpae_cp_reginfo);
6658 }
6659 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
6660 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
6661 * be read-only (ie write causes UNDEF exception).
6662 */
6663 {
6664 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
6665 /* Pre-v8 MIDR space.
6666 * Note that the MIDR isn't a simple constant register because
6667 * of the TI925 behaviour where writes to another register can
6668 * cause the MIDR value to change.
6669 *
6670 * Unimplemented registers in the c15 0 0 0 space default to
6671 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
6672 * and friends override accordingly.
6673 */
6674 { .name = "MIDR",
6675 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
6676 .access = PL1_R, .resetvalue = cpu->midr,
6677 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
6678 .readfn = midr_read,
6679 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
6680 .type = ARM_CP_OVERRIDE },
6681 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
6682 { .name = "DUMMY",
6683 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
6684 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6685 { .name = "DUMMY",
6686 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
6687 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6688 { .name = "DUMMY",
6689 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
6690 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6691 { .name = "DUMMY",
6692 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
6693 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6694 { .name = "DUMMY",
6695 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
6696 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6697 REGINFO_SENTINEL
6698 };
6699 ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
6700 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
6701 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
6702 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
6703 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
6704 .readfn = midr_read },
6705 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
6706 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
6707 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
6708 .access = PL1_R, .resetvalue = cpu->midr },
6709 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
6710 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
6711 .access = PL1_R, .resetvalue = cpu->midr },
6712 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
6713 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
6714 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
6715 REGINFO_SENTINEL
6716 };
6717 ARMCPRegInfo id_cp_reginfo[] = {
6718 /* These are common to v8 and pre-v8 */
6719 { .name = "CTR",
6720 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
6721 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
6722 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
6723 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
6724 .access = PL0_R, .accessfn = ctr_el0_access,
6725 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
6726 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
6727 { .name = "TCMTR",
6728 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
6729 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6730 REGINFO_SENTINEL
6731 };
6732 /* TLBTR is specific to VMSA */
6733 ARMCPRegInfo id_tlbtr_reginfo = {
6734 .name = "TLBTR",
6735 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
6736 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0,
6737 };
6738 /* MPUIR is specific to PMSA V6+ */
6739 ARMCPRegInfo id_mpuir_reginfo = {
6740 .name = "MPUIR",
6741 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
6742 .access = PL1_R, .type = ARM_CP_CONST,
6743 .resetvalue = cpu->pmsav7_dregion << 8
6744 };
6745 ARMCPRegInfo crn0_wi_reginfo = {
6746 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
6747 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
6748 .type = ARM_CP_NOP | ARM_CP_OVERRIDE
6749 };
6750 #ifdef CONFIG_USER_ONLY
6751 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
6752 { .name = "MIDR_EL1",
6753 .exported_bits = 0x00000000ffffffff },
6754 { .name = "REVIDR_EL1" },
6755 REGUSERINFO_SENTINEL
6756 };
6757 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
6758 #endif
6759 if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
6760 arm_feature(env, ARM_FEATURE_STRONGARM)) {
6761 ARMCPRegInfo *r;
6762 /* Register the blanket "writes ignored" value first to cover the
6763 * whole space. Then update the specific ID registers to allow write
6764 * access, so that they ignore writes rather than causing them to
6765 * UNDEF.
6766 */
6767 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
6768 for (r = id_pre_v8_midr_cp_reginfo;
6769 r->type != ARM_CP_SENTINEL; r++) {
6770 r->access = PL1_RW;
6771 }
6772 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
6773 r->access = PL1_RW;
6774 }
6775 id_mpuir_reginfo.access = PL1_RW;
6776 id_tlbtr_reginfo.access = PL1_RW;
6777 }
6778 if (arm_feature(env, ARM_FEATURE_V8)) {
6779 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
6780 } else {
6781 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
6782 }
6783 define_arm_cp_regs(cpu, id_cp_reginfo);
6784 if (!arm_feature(env, ARM_FEATURE_PMSA)) {
6785 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
6786 } else if (arm_feature(env, ARM_FEATURE_V7)) {
6787 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
6788 }
6789 }
6790
6791 if (arm_feature(env, ARM_FEATURE_MPIDR)) {
6792 ARMCPRegInfo mpidr_cp_reginfo[] = {
6793 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
6794 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
6795 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
6796 REGINFO_SENTINEL
6797 };
6798 #ifdef CONFIG_USER_ONLY
6799 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
6800 { .name = "MPIDR_EL1",
6801 .fixed_bits = 0x0000000080000000 },
6802 REGUSERINFO_SENTINEL
6803 };
6804 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
6805 #endif
6806 define_arm_cp_regs(cpu, mpidr_cp_reginfo);
6807 }
6808
6809 if (arm_feature(env, ARM_FEATURE_AUXCR)) {
6810 ARMCPRegInfo auxcr_reginfo[] = {
6811 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
6812 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
6813 .access = PL1_RW, .type = ARM_CP_CONST,
6814 .resetvalue = cpu->reset_auxcr },
6815 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
6816 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
6817 .access = PL2_RW, .type = ARM_CP_CONST,
6818 .resetvalue = 0 },
6819 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
6820 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
6821 .access = PL3_RW, .type = ARM_CP_CONST,
6822 .resetvalue = 0 },
6823 REGINFO_SENTINEL
6824 };
6825 define_arm_cp_regs(cpu, auxcr_reginfo);
6826 if (arm_feature(env, ARM_FEATURE_V8)) {
6827 /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */
6828 ARMCPRegInfo hactlr2_reginfo = {
6829 .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
6830 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
6831 .access = PL2_RW, .type = ARM_CP_CONST,
6832 .resetvalue = 0
6833 };
6834 define_one_arm_cp_reg(cpu, &hactlr2_reginfo);
6835 }
6836 }
6837
6838 if (arm_feature(env, ARM_FEATURE_CBAR)) {
6839 /*
6840 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
6841 * There are two flavours:
6842 * (1) older 32-bit only cores have a simple 32-bit CBAR
6843 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
6844 * 32-bit register visible to AArch32 at a different encoding
6845 * to the "flavour 1" register and with the bits rearranged to
6846 * be able to squash a 64-bit address into the 32-bit view.
6847 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
6848 * in future if we support AArch32-only configs of some of the
6849 * AArch64 cores we might need to add a specific feature flag
6850 * to indicate cores with "flavour 2" CBAR.
6851 */
6852 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6853 /* 32 bit view is [31:18] 0...0 [43:32]. */
6854 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
6855 | extract64(cpu->reset_cbar, 32, 12);
6856 ARMCPRegInfo cbar_reginfo[] = {
6857 { .name = "CBAR",
6858 .type = ARM_CP_CONST,
6859 .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
6860 .access = PL1_R, .resetvalue = cbar32 },
6861 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
6862 .type = ARM_CP_CONST,
6863 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
6864 .access = PL1_R, .resetvalue = cpu->reset_cbar },
6865 REGINFO_SENTINEL
6866 };
6867 /* We don't implement a r/w 64 bit CBAR currently */
6868 assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
6869 define_arm_cp_regs(cpu, cbar_reginfo);
6870 } else {
6871 ARMCPRegInfo cbar = {
6872 .name = "CBAR",
6873 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
6874 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
6875 .fieldoffset = offsetof(CPUARMState,
6876 cp15.c15_config_base_address)
6877 };
6878 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
6879 cbar.access = PL1_R;
6880 cbar.fieldoffset = 0;
6881 cbar.type = ARM_CP_CONST;
6882 }
6883 define_one_arm_cp_reg(cpu, &cbar);
6884 }
6885 }
6886
6887 if (arm_feature(env, ARM_FEATURE_VBAR)) {
6888 ARMCPRegInfo vbar_cp_reginfo[] = {
6889 { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
6890 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
6891 .access = PL1_RW, .writefn = vbar_write,
6892 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
6893 offsetof(CPUARMState, cp15.vbar_ns) },
6894 .resetvalue = 0 },
6895 REGINFO_SENTINEL
6896 };
6897 define_arm_cp_regs(cpu, vbar_cp_reginfo);
6898 }
6899
6900 /* Generic registers whose values depend on the implementation */
6901 {
6902 ARMCPRegInfo sctlr = {
6903 .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
6904 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
6905 .access = PL1_RW,
6906 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
6907 offsetof(CPUARMState, cp15.sctlr_ns) },
6908 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
6909 .raw_writefn = raw_write,
6910 };
6911 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6912 /* Normally we would always end the TB on an SCTLR write, but Linux
6913 * arch/arm/mach-pxa/sleep.S expects two instructions following
6914 * an MMU enable to execute from cache. Imitate this behaviour.
6915 */
6916 sctlr.type |= ARM_CP_SUPPRESS_TB_END;
6917 }
6918 define_one_arm_cp_reg(cpu, &sctlr);
6919 }
6920
6921 if (cpu_isar_feature(aa64_lor, cpu)) {
6922 /*
6923 * A trivial implementation of ARMv8.1-LOR leaves all of these
6924 * registers fixed at 0, which indicates that there are zero
6925 * supported Limited Ordering regions.
6926 */
6927 static const ARMCPRegInfo lor_reginfo[] = {
6928 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
6929 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
6930 .access = PL1_RW, .accessfn = access_lor_other,
6931 .type = ARM_CP_CONST, .resetvalue = 0 },
6932 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
6933 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
6934 .access = PL1_RW, .accessfn = access_lor_other,
6935 .type = ARM_CP_CONST, .resetvalue = 0 },
6936 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
6937 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
6938 .access = PL1_RW, .accessfn = access_lor_other,
6939 .type = ARM_CP_CONST, .resetvalue = 0 },
6940 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
6941 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
6942 .access = PL1_RW, .accessfn = access_lor_other,
6943 .type = ARM_CP_CONST, .resetvalue = 0 },
6944 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
6945 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
6946 .access = PL1_R, .accessfn = access_lorid,
6947 .type = ARM_CP_CONST, .resetvalue = 0 },
6948 REGINFO_SENTINEL
6949 };
6950 define_arm_cp_regs(cpu, lor_reginfo);
6951 }
6952
6953 if (cpu_isar_feature(aa64_sve, cpu)) {
6954 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
6955 if (arm_feature(env, ARM_FEATURE_EL2)) {
6956 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
6957 } else {
6958 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo);
6959 }
6960 if (arm_feature(env, ARM_FEATURE_EL3)) {
6961 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo);
6962 }
6963 }
6964
6965 #ifdef TARGET_AARCH64
6966 if (cpu_isar_feature(aa64_pauth, cpu)) {
6967 define_arm_cp_regs(cpu, pauth_reginfo);
6968 }
6969 if (cpu_isar_feature(aa64_rndr, cpu)) {
6970 define_arm_cp_regs(cpu, rndr_reginfo);
6971 }
6972 #endif
6973
6974 /*
6975 * While all v8.0 cpus support aarch64, QEMU does have configurations
6976 * that do not set ID_AA64ISAR1, e.g. user-only qemu-arm -cpu max,
6977 * which will set ID_ISAR6.
6978 */
6979 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
6980 ? cpu_isar_feature(aa64_predinv, cpu)
6981 : cpu_isar_feature(aa32_predinv, cpu)) {
6982 define_arm_cp_regs(cpu, predinv_reginfo);
6983 }
6984 }
6985
arm_cpu_register_gdb_regs_for_features(ARMCPU * cpu)6986 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
6987 {
6988 CPUState *cs = CPU(cpu);
6989 CPUARMState *env = &cpu->env;
6990
6991 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6992 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
6993 aarch64_fpu_gdb_set_reg,
6994 34, "aarch64-fpu.xml", 0);
6995 } else if (arm_feature(env, ARM_FEATURE_NEON)) {
6996 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
6997 51, "arm-neon.xml", 0);
6998 } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
6999 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
7000 35, "arm-vfp3.xml", 0);
7001 } else if (arm_feature(env, ARM_FEATURE_VFP)) {
7002 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
7003 19, "arm-vfp.xml", 0);
7004 }
7005 gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
7006 arm_gen_dynamic_xml(cs),
7007 "system-registers.xml", 0);
7008 }
7009
7010 /* Sort alphabetically by type name, except for "any". */
arm_cpu_list_compare(gconstpointer a,gconstpointer b)7011 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
7012 {
7013 ObjectClass *class_a = (ObjectClass *)a;
7014 ObjectClass *class_b = (ObjectClass *)b;
7015 const char *name_a, *name_b;
7016
7017 name_a = object_class_get_name(class_a);
7018 name_b = object_class_get_name(class_b);
7019 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
7020 return 1;
7021 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
7022 return -1;
7023 } else {
7024 return strcmp(name_a, name_b);
7025 }
7026 }
7027
arm_cpu_list_entry(gpointer data,gpointer user_data)7028 static void arm_cpu_list_entry(gpointer data, gpointer user_data)
7029 {
7030 ObjectClass *oc = data;
7031 const char *typename;
7032 char *name;
7033
7034 typename = object_class_get_name(oc);
7035 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
7036 qemu_printf(" %s\n", name);
7037 g_free(name);
7038 }
7039
arm_cpu_list(void)7040 void arm_cpu_list(void)
7041 {
7042 GSList *list;
7043
7044 list = object_class_get_list(TYPE_ARM_CPU, false);
7045 list = g_slist_sort(list, arm_cpu_list_compare);
7046 qemu_printf("Available CPUs:\n");
7047 g_slist_foreach(list, arm_cpu_list_entry, NULL);
7048 g_slist_free(list);
7049 }
7050
arm_cpu_add_definition(gpointer data,gpointer user_data)7051 static void arm_cpu_add_definition(gpointer data, gpointer user_data)
7052 {
7053 ObjectClass *oc = data;
7054 CpuDefinitionInfoList **cpu_list = user_data;
7055 CpuDefinitionInfoList *entry;
7056 CpuDefinitionInfo *info;
7057 const char *typename;
7058
7059 typename = object_class_get_name(oc);
7060 info = g_malloc0(sizeof(*info));
7061 info->name = g_strndup(typename,
7062 strlen(typename) - strlen("-" TYPE_ARM_CPU));
7063 info->q_typename = g_strdup(typename);
7064
7065 entry = g_malloc0(sizeof(*entry));
7066 entry->value = info;
7067 entry->next = *cpu_list;
7068 *cpu_list = entry;
7069 }
7070
qmp_query_cpu_definitions(Error ** errp)7071 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
7072 {
7073 CpuDefinitionInfoList *cpu_list = NULL;
7074 GSList *list;
7075
7076 list = object_class_get_list(TYPE_ARM_CPU, false);
7077 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
7078 g_slist_free(list);
7079
7080 return cpu_list;
7081 }
7082
add_cpreg_to_hashtable(ARMCPU * cpu,const ARMCPRegInfo * r,void * opaque,int state,int secstate,int crm,int opc1,int opc2,const char * name)7083 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
7084 void *opaque, int state, int secstate,
7085 int crm, int opc1, int opc2,
7086 const char *name)
7087 {
7088 /* Private utility function for define_one_arm_cp_reg_with_opaque():
7089 * add a single reginfo struct to the hash table.
7090 */
7091 uint32_t *key = g_new(uint32_t, 1);
7092 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
7093 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
7094 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
7095
7096 r2->name = g_strdup(name);
7097 /* Reset the secure state to the specific incoming state. This is
7098 * necessary as the register may have been defined with both states.
7099 */
7100 r2->secure = secstate;
7101
7102 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
7103 /* Register is banked (using both entries in array).
7104 * Overwriting fieldoffset as the array is only used to define
7105 * banked registers but later only fieldoffset is used.
7106 */
7107 r2->fieldoffset = r->bank_fieldoffsets[ns];
7108 }
7109
7110 if (state == ARM_CP_STATE_AA32) {
7111 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
7112 /* If the register is banked then we don't need to migrate or
7113 * reset the 32-bit instance in certain cases:
7114 *
7115 * 1) If the register has both 32-bit and 64-bit instances then we
7116 * can count on the 64-bit instance taking care of the
7117 * non-secure bank.
7118 * 2) If ARMv8 is enabled then we can count on a 64-bit version
7119 * taking care of the secure bank. This requires that separate
7120 * 32 and 64-bit definitions are provided.
7121 */
7122 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
7123 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
7124 r2->type |= ARM_CP_ALIAS;
7125 }
7126 } else if ((secstate != r->secure) && !ns) {
7127 /* The register is not banked so we only want to allow migration of
7128 * the non-secure instance.
7129 */
7130 r2->type |= ARM_CP_ALIAS;
7131 }
7132
7133 if (r->state == ARM_CP_STATE_BOTH) {
7134 /* We assume it is a cp15 register if the .cp field is left unset.
7135 */
7136 if (r2->cp == 0) {
7137 r2->cp = 15;
7138 }
7139
7140 #ifdef HOST_WORDS_BIGENDIAN
7141 if (r2->fieldoffset) {
7142 r2->fieldoffset += sizeof(uint32_t);
7143 }
7144 #endif
7145 }
7146 }
7147 if (state == ARM_CP_STATE_AA64) {
7148 /* To allow abbreviation of ARMCPRegInfo
7149 * definitions, we treat cp == 0 as equivalent to
7150 * the value for "standard guest-visible sysreg".
7151 * STATE_BOTH definitions are also always "standard
7152 * sysreg" in their AArch64 view (the .cp value may
7153 * be non-zero for the benefit of the AArch32 view).
7154 */
7155 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
7156 r2->cp = CP_REG_ARM64_SYSREG_CP;
7157 }
7158 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
7159 r2->opc0, opc1, opc2);
7160 } else {
7161 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
7162 }
7163 if (opaque) {
7164 r2->opaque = opaque;
7165 }
7166 /* reginfo passed to helpers is correct for the actual access,
7167 * and is never ARM_CP_STATE_BOTH:
7168 */
7169 r2->state = state;
7170 /* Make sure reginfo passed to helpers for wildcarded regs
7171 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
7172 */
7173 r2->crm = crm;
7174 r2->opc1 = opc1;
7175 r2->opc2 = opc2;
7176 /* By convention, for wildcarded registers only the first
7177 * entry is used for migration; the others are marked as
7178 * ALIAS so we don't try to transfer the register
7179 * multiple times. Special registers (ie NOP/WFI) are
7180 * never migratable and not even raw-accessible.
7181 */
7182 if ((r->type & ARM_CP_SPECIAL)) {
7183 r2->type |= ARM_CP_NO_RAW;
7184 }
7185 if (((r->crm == CP_ANY) && crm != 0) ||
7186 ((r->opc1 == CP_ANY) && opc1 != 0) ||
7187 ((r->opc2 == CP_ANY) && opc2 != 0)) {
7188 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
7189 }
7190
7191 /* Check that raw accesses are either forbidden or handled. Note that
7192 * we can't assert this earlier because the setup of fieldoffset for
7193 * banked registers has to be done first.
7194 */
7195 if (!(r2->type & ARM_CP_NO_RAW)) {
7196 assert(!raw_accessors_invalid(r2));
7197 }
7198
7199 /* Overriding of an existing definition must be explicitly
7200 * requested.
7201 */
7202 if (!(r->type & ARM_CP_OVERRIDE)) {
7203 ARMCPRegInfo *oldreg;
7204 oldreg = g_hash_table_lookup(cpu->cp_regs, key);
7205 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
7206 fprintf(stderr, "Register redefined: cp=%d %d bit "
7207 "crn=%d crm=%d opc1=%d opc2=%d, "
7208 "was %s, now %s\n", r2->cp, 32 + 32 * is64,
7209 r2->crn, r2->crm, r2->opc1, r2->opc2,
7210 oldreg->name, r2->name);
7211 g_assert_not_reached();
7212 }
7213 }
7214 g_hash_table_insert(cpu->cp_regs, key, r2);
7215 }
7216
7217
define_one_arm_cp_reg_with_opaque(ARMCPU * cpu,const ARMCPRegInfo * r,void * opaque)7218 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
7219 const ARMCPRegInfo *r, void *opaque)
7220 {
7221 /* Define implementations of coprocessor registers.
7222 * We store these in a hashtable because typically
7223 * there are less than 150 registers in a space which
7224 * is 16*16*16*8*8 = 262144 in size.
7225 * Wildcarding is supported for the crm, opc1 and opc2 fields.
7226 * If a register is defined twice then the second definition is
7227 * used, so this can be used to define some generic registers and
7228 * then override them with implementation specific variations.
7229 * At least one of the original and the second definition should
7230 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
7231 * against accidental use.
7232 *
7233 * The state field defines whether the register is to be
7234 * visible in the AArch32 or AArch64 execution state. If the
7235 * state is set to ARM_CP_STATE_BOTH then we synthesise a
7236 * reginfo structure for the AArch32 view, which sees the lower
7237 * 32 bits of the 64 bit register.
7238 *
7239 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
7240 * be wildcarded. AArch64 registers are always considered to be 64
7241 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
7242 * the register, if any.
7243 */
7244 int crm, opc1, opc2, state;
7245 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
7246 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
7247 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
7248 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
7249 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
7250 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
7251 /* 64 bit registers have only CRm and Opc1 fields */
7252 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
7253 /* op0 only exists in the AArch64 encodings */
7254 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
7255 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
7256 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
7257 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
7258 * encodes a minimum access level for the register. We roll this
7259 * runtime check into our general permission check code, so check
7260 * here that the reginfo's specified permissions are strict enough
7261 * to encompass the generic architectural permission check.
7262 */
7263 if (r->state != ARM_CP_STATE_AA32) {
7264 int mask = 0;
7265 switch (r->opc1) {
7266 case 0:
7267 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
7268 mask = PL0U_R | PL1_RW;
7269 break;
7270 case 1: case 2:
7271 /* min_EL EL1 */
7272 mask = PL1_RW;
7273 break;
7274 case 3:
7275 /* min_EL EL0 */
7276 mask = PL0_RW;
7277 break;
7278 case 4:
7279 /* min_EL EL2 */
7280 mask = PL2_RW;
7281 break;
7282 case 5:
7283 /* unallocated encoding, so not possible */
7284 assert(false);
7285 break;
7286 case 6:
7287 /* min_EL EL3 */
7288 mask = PL3_RW;
7289 break;
7290 case 7:
7291 /* min_EL EL1, secure mode only (we don't check the latter) */
7292 mask = PL1_RW;
7293 break;
7294 default:
7295 /* broken reginfo with out-of-range opc1 */
7296 assert(false);
7297 break;
7298 }
7299 /* assert our permissions are not too lax (stricter is fine) */
7300 assert((r->access & ~mask) == 0);
7301 }
7302
7303 /* Check that the register definition has enough info to handle
7304 * reads and writes if they are permitted.
7305 */
7306 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
7307 if (r->access & PL3_R) {
7308 assert((r->fieldoffset ||
7309 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7310 r->readfn);
7311 }
7312 if (r->access & PL3_W) {
7313 assert((r->fieldoffset ||
7314 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7315 r->writefn);
7316 }
7317 }
7318 /* Bad type field probably means missing sentinel at end of reg list */
7319 assert(cptype_valid(r->type));
7320 for (crm = crmmin; crm <= crmmax; crm++) {
7321 for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
7322 for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
7323 for (state = ARM_CP_STATE_AA32;
7324 state <= ARM_CP_STATE_AA64; state++) {
7325 if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
7326 continue;
7327 }
7328 if (state == ARM_CP_STATE_AA32) {
7329 /* Under AArch32 CP registers can be common
7330 * (same for secure and non-secure world) or banked.
7331 */
7332 char *name;
7333
7334 switch (r->secure) {
7335 case ARM_CP_SECSTATE_S:
7336 case ARM_CP_SECSTATE_NS:
7337 add_cpreg_to_hashtable(cpu, r, opaque, state,
7338 r->secure, crm, opc1, opc2,
7339 r->name);
7340 break;
7341 default:
7342 name = g_strdup_printf("%s_S", r->name);
7343 add_cpreg_to_hashtable(cpu, r, opaque, state,
7344 ARM_CP_SECSTATE_S,
7345 crm, opc1, opc2, name);
7346 g_free(name);
7347 add_cpreg_to_hashtable(cpu, r, opaque, state,
7348 ARM_CP_SECSTATE_NS,
7349 crm, opc1, opc2, r->name);
7350 break;
7351 }
7352 } else {
7353 /* AArch64 registers get mapped to non-secure instance
7354 * of AArch32 */
7355 add_cpreg_to_hashtable(cpu, r, opaque, state,
7356 ARM_CP_SECSTATE_NS,
7357 crm, opc1, opc2, r->name);
7358 }
7359 }
7360 }
7361 }
7362 }
7363 }
7364
define_arm_cp_regs_with_opaque(ARMCPU * cpu,const ARMCPRegInfo * regs,void * opaque)7365 void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
7366 const ARMCPRegInfo *regs, void *opaque)
7367 {
7368 /* Define a whole list of registers */
7369 const ARMCPRegInfo *r;
7370 for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
7371 define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
7372 }
7373 }
7374
7375 /*
7376 * Modify ARMCPRegInfo for access from userspace.
7377 *
7378 * This is a data driven modification directed by
7379 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
7380 * user-space cannot alter any values and dynamic values pertaining to
7381 * execution state are hidden from user space view anyway.
7382 */
modify_arm_cp_regs(ARMCPRegInfo * regs,const ARMCPRegUserSpaceInfo * mods)7383 void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods)
7384 {
7385 const ARMCPRegUserSpaceInfo *m;
7386 ARMCPRegInfo *r;
7387
7388 for (m = mods; m->name; m++) {
7389 GPatternSpec *pat = NULL;
7390 if (m->is_glob) {
7391 pat = g_pattern_spec_new(m->name);
7392 }
7393 for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
7394 if (pat && g_pattern_match_string(pat, r->name)) {
7395 r->type = ARM_CP_CONST;
7396 r->access = PL0U_R;
7397 r->resetvalue = 0;
7398 /* continue */
7399 } else if (strcmp(r->name, m->name) == 0) {
7400 r->type = ARM_CP_CONST;
7401 r->access = PL0U_R;
7402 r->resetvalue &= m->exported_bits;
7403 r->resetvalue |= m->fixed_bits;
7404 break;
7405 }
7406 }
7407 if (pat) {
7408 g_pattern_spec_free(pat);
7409 }
7410 }
7411 }
7412
get_arm_cp_reginfo(GHashTable * cpregs,uint32_t encoded_cp)7413 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
7414 {
7415 return g_hash_table_lookup(cpregs, &encoded_cp);
7416 }
7417
arm_cp_write_ignore(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)7418 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
7419 uint64_t value)
7420 {
7421 /* Helper coprocessor write function for write-ignore registers */
7422 }
7423
arm_cp_read_zero(CPUARMState * env,const ARMCPRegInfo * ri)7424 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
7425 {
7426 /* Helper coprocessor write function for read-as-zero registers */
7427 return 0;
7428 }
7429
arm_cp_reset_ignore(CPUARMState * env,const ARMCPRegInfo * opaque)7430 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
7431 {
7432 /* Helper coprocessor reset function for do-nothing-on-reset registers */
7433 }
7434
bad_mode_switch(CPUARMState * env,int mode,CPSRWriteType write_type)7435 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
7436 {
7437 /* Return true if it is not valid for us to switch to
7438 * this CPU mode (ie all the UNPREDICTABLE cases in
7439 * the ARM ARM CPSRWriteByInstr pseudocode).
7440 */
7441
7442 /* Changes to or from Hyp via MSR and CPS are illegal. */
7443 if (write_type == CPSRWriteByInstr &&
7444 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
7445 mode == ARM_CPU_MODE_HYP)) {
7446 return 1;
7447 }
7448
7449 switch (mode) {
7450 case ARM_CPU_MODE_USR:
7451 return 0;
7452 case ARM_CPU_MODE_SYS:
7453 case ARM_CPU_MODE_SVC:
7454 case ARM_CPU_MODE_ABT:
7455 case ARM_CPU_MODE_UND:
7456 case ARM_CPU_MODE_IRQ:
7457 case ARM_CPU_MODE_FIQ:
7458 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
7459 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
7460 */
7461 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
7462 * and CPS are treated as illegal mode changes.
7463 */
7464 if (write_type == CPSRWriteByInstr &&
7465 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
7466 (arm_hcr_el2_eff(env) & HCR_TGE)) {
7467 return 1;
7468 }
7469 return 0;
7470 case ARM_CPU_MODE_HYP:
7471 return !arm_feature(env, ARM_FEATURE_EL2)
7472 || arm_current_el(env) < 2 || arm_is_secure_below_el3(env);
7473 case ARM_CPU_MODE_MON:
7474 return arm_current_el(env) < 3;
7475 default:
7476 return 1;
7477 }
7478 }
7479
cpsr_read(CPUARMState * env)7480 uint32_t cpsr_read(CPUARMState *env)
7481 {
7482 int ZF;
7483 ZF = (env->ZF == 0);
7484 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
7485 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
7486 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
7487 | ((env->condexec_bits & 0xfc) << 8)
7488 | (env->GE << 16) | (env->daif & CPSR_AIF);
7489 }
7490
cpsr_write(CPUARMState * env,uint32_t val,uint32_t mask,CPSRWriteType write_type)7491 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
7492 CPSRWriteType write_type)
7493 {
7494 uint32_t changed_daif;
7495
7496 if (mask & CPSR_NZCV) {
7497 env->ZF = (~val) & CPSR_Z;
7498 env->NF = val;
7499 env->CF = (val >> 29) & 1;
7500 env->VF = (val << 3) & 0x80000000;
7501 }
7502 if (mask & CPSR_Q)
7503 env->QF = ((val & CPSR_Q) != 0);
7504 if (mask & CPSR_T)
7505 env->thumb = ((val & CPSR_T) != 0);
7506 if (mask & CPSR_IT_0_1) {
7507 env->condexec_bits &= ~3;
7508 env->condexec_bits |= (val >> 25) & 3;
7509 }
7510 if (mask & CPSR_IT_2_7) {
7511 env->condexec_bits &= 3;
7512 env->condexec_bits |= (val >> 8) & 0xfc;
7513 }
7514 if (mask & CPSR_GE) {
7515 env->GE = (val >> 16) & 0xf;
7516 }
7517
7518 /* In a V7 implementation that includes the security extensions but does
7519 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
7520 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
7521 * bits respectively.
7522 *
7523 * In a V8 implementation, it is permitted for privileged software to
7524 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
7525 */
7526 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
7527 arm_feature(env, ARM_FEATURE_EL3) &&
7528 !arm_feature(env, ARM_FEATURE_EL2) &&
7529 !arm_is_secure(env)) {
7530
7531 changed_daif = (env->daif ^ val) & mask;
7532
7533 if (changed_daif & CPSR_A) {
7534 /* Check to see if we are allowed to change the masking of async
7535 * abort exceptions from a non-secure state.
7536 */
7537 if (!(env->cp15.scr_el3 & SCR_AW)) {
7538 qemu_log_mask(LOG_GUEST_ERROR,
7539 "Ignoring attempt to switch CPSR_A flag from "
7540 "non-secure world with SCR.AW bit clear\n");
7541 mask &= ~CPSR_A;
7542 }
7543 }
7544
7545 if (changed_daif & CPSR_F) {
7546 /* Check to see if we are allowed to change the masking of FIQ
7547 * exceptions from a non-secure state.
7548 */
7549 if (!(env->cp15.scr_el3 & SCR_FW)) {
7550 qemu_log_mask(LOG_GUEST_ERROR,
7551 "Ignoring attempt to switch CPSR_F flag from "
7552 "non-secure world with SCR.FW bit clear\n");
7553 mask &= ~CPSR_F;
7554 }
7555
7556 /* Check whether non-maskable FIQ (NMFI) support is enabled.
7557 * If this bit is set software is not allowed to mask
7558 * FIQs, but is allowed to set CPSR_F to 0.
7559 */
7560 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
7561 (val & CPSR_F)) {
7562 qemu_log_mask(LOG_GUEST_ERROR,
7563 "Ignoring attempt to enable CPSR_F flag "
7564 "(non-maskable FIQ [NMFI] support enabled)\n");
7565 mask &= ~CPSR_F;
7566 }
7567 }
7568 }
7569
7570 env->daif &= ~(CPSR_AIF & mask);
7571 env->daif |= val & CPSR_AIF & mask;
7572
7573 if (write_type != CPSRWriteRaw &&
7574 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
7575 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
7576 /* Note that we can only get here in USR mode if this is a
7577 * gdb stub write; for this case we follow the architectural
7578 * behaviour for guest writes in USR mode of ignoring an attempt
7579 * to switch mode. (Those are caught by translate.c for writes
7580 * triggered by guest instructions.)
7581 */
7582 mask &= ~CPSR_M;
7583 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
7584 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
7585 * v7, and has defined behaviour in v8:
7586 * + leave CPSR.M untouched
7587 * + allow changes to the other CPSR fields
7588 * + set PSTATE.IL
7589 * For user changes via the GDB stub, we don't set PSTATE.IL,
7590 * as this would be unnecessarily harsh for a user error.
7591 */
7592 mask &= ~CPSR_M;
7593 if (write_type != CPSRWriteByGDBStub &&
7594 arm_feature(env, ARM_FEATURE_V8)) {
7595 mask |= CPSR_IL;
7596 val |= CPSR_IL;
7597 }
7598 qemu_log_mask(LOG_GUEST_ERROR,
7599 "Illegal AArch32 mode switch attempt from %s to %s\n",
7600 aarch32_mode_name(env->uncached_cpsr),
7601 aarch32_mode_name(val));
7602 } else {
7603 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
7604 write_type == CPSRWriteExceptionReturn ?
7605 "Exception return from AArch32" :
7606 "AArch32 mode switch from",
7607 aarch32_mode_name(env->uncached_cpsr),
7608 aarch32_mode_name(val), env->regs[15]);
7609 switch_mode(env, val & CPSR_M);
7610 }
7611 }
7612 mask &= ~CACHED_CPSR_BITS;
7613 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
7614 }
7615
7616 /* Sign/zero extend */
HELPER(sxtb16)7617 uint32_t HELPER(sxtb16)(uint32_t x)
7618 {
7619 uint32_t res;
7620 res = (uint16_t)(int8_t)x;
7621 res |= (uint32_t)(int8_t)(x >> 16) << 16;
7622 return res;
7623 }
7624
HELPER(uxtb16)7625 uint32_t HELPER(uxtb16)(uint32_t x)
7626 {
7627 uint32_t res;
7628 res = (uint16_t)(uint8_t)x;
7629 res |= (uint32_t)(uint8_t)(x >> 16) << 16;
7630 return res;
7631 }
7632
HELPER(sdiv)7633 int32_t HELPER(sdiv)(int32_t num, int32_t den)
7634 {
7635 if (den == 0)
7636 return 0;
7637 if (num == INT_MIN && den == -1)
7638 return INT_MIN;
7639 return num / den;
7640 }
7641
HELPER(udiv)7642 uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
7643 {
7644 if (den == 0)
7645 return 0;
7646 return num / den;
7647 }
7648
HELPER(rbit)7649 uint32_t HELPER(rbit)(uint32_t x)
7650 {
7651 return revbit32(x);
7652 }
7653
7654 #ifdef CONFIG_USER_ONLY
7655
switch_mode(CPUARMState * env,int mode)7656 static void switch_mode(CPUARMState *env, int mode)
7657 {
7658 ARMCPU *cpu = env_archcpu(env);
7659
7660 if (mode != ARM_CPU_MODE_USR) {
7661 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
7662 }
7663 }
7664
arm_phys_excp_target_el(CPUState * cs,uint32_t excp_idx,uint32_t cur_el,bool secure)7665 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
7666 uint32_t cur_el, bool secure)
7667 {
7668 return 1;
7669 }
7670
aarch64_sync_64_to_32(CPUARMState * env)7671 void aarch64_sync_64_to_32(CPUARMState *env)
7672 {
7673 g_assert_not_reached();
7674 }
7675
7676 #else
7677
switch_mode(CPUARMState * env,int mode)7678 static void switch_mode(CPUARMState *env, int mode)
7679 {
7680 int old_mode;
7681 int i;
7682
7683 old_mode = env->uncached_cpsr & CPSR_M;
7684 if (mode == old_mode)
7685 return;
7686
7687 if (old_mode == ARM_CPU_MODE_FIQ) {
7688 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
7689 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
7690 } else if (mode == ARM_CPU_MODE_FIQ) {
7691 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
7692 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
7693 }
7694
7695 i = bank_number(old_mode);
7696 env->banked_r13[i] = env->regs[13];
7697 env->banked_spsr[i] = env->spsr;
7698
7699 i = bank_number(mode);
7700 env->regs[13] = env->banked_r13[i];
7701 env->spsr = env->banked_spsr[i];
7702
7703 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
7704 env->regs[14] = env->banked_r14[r14_bank_number(mode)];
7705 }
7706
7707 /* Physical Interrupt Target EL Lookup Table
7708 *
7709 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
7710 *
7711 * The below multi-dimensional table is used for looking up the target
7712 * exception level given numerous condition criteria. Specifically, the
7713 * target EL is based on SCR and HCR routing controls as well as the
7714 * currently executing EL and secure state.
7715 *
7716 * Dimensions:
7717 * target_el_table[2][2][2][2][2][4]
7718 * | | | | | +--- Current EL
7719 * | | | | +------ Non-secure(0)/Secure(1)
7720 * | | | +--------- HCR mask override
7721 * | | +------------ SCR exec state control
7722 * | +--------------- SCR mask override
7723 * +------------------ 32-bit(0)/64-bit(1) EL3
7724 *
7725 * The table values are as such:
7726 * 0-3 = EL0-EL3
7727 * -1 = Cannot occur
7728 *
7729 * The ARM ARM target EL table includes entries indicating that an "exception
7730 * is not taken". The two cases where this is applicable are:
7731 * 1) An exception is taken from EL3 but the SCR does not have the exception
7732 * routed to EL3.
7733 * 2) An exception is taken from EL2 but the HCR does not have the exception
7734 * routed to EL2.
7735 * In these two cases, the below table contain a target of EL1. This value is
7736 * returned as it is expected that the consumer of the table data will check
7737 * for "target EL >= current EL" to ensure the exception is not taken.
7738 *
7739 * SCR HCR
7740 * 64 EA AMO From
7741 * BIT IRQ IMO Non-secure Secure
7742 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
7743 */
7744 static const int8_t target_el_table[2][2][2][2][2][4] = {
7745 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
7746 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
7747 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
7748 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
7749 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
7750 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
7751 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
7752 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
7753 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
7754 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
7755 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
7756 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
7757 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
7758 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
7759 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
7760 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
7761 };
7762
7763 /*
7764 * Determine the target EL for physical exceptions
7765 */
arm_phys_excp_target_el(CPUState * cs,uint32_t excp_idx,uint32_t cur_el,bool secure)7766 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
7767 uint32_t cur_el, bool secure)
7768 {
7769 CPUARMState *env = cs->env_ptr;
7770 bool rw;
7771 bool scr;
7772 bool hcr;
7773 int target_el;
7774 /* Is the highest EL AArch64? */
7775 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
7776 uint64_t hcr_el2;
7777
7778 if (arm_feature(env, ARM_FEATURE_EL3)) {
7779 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
7780 } else {
7781 /* Either EL2 is the highest EL (and so the EL2 register width
7782 * is given by is64); or there is no EL2 or EL3, in which case
7783 * the value of 'rw' does not affect the table lookup anyway.
7784 */
7785 rw = is64;
7786 }
7787
7788 hcr_el2 = arm_hcr_el2_eff(env);
7789 switch (excp_idx) {
7790 case EXCP_IRQ:
7791 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
7792 hcr = hcr_el2 & HCR_IMO;
7793 break;
7794 case EXCP_FIQ:
7795 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
7796 hcr = hcr_el2 & HCR_FMO;
7797 break;
7798 default:
7799 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
7800 hcr = hcr_el2 & HCR_AMO;
7801 break;
7802 };
7803
7804 /* Perform a table-lookup for the target EL given the current state */
7805 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
7806
7807 assert(target_el > 0);
7808
7809 return target_el;
7810 }
7811
arm_log_exception(int idx)7812 void arm_log_exception(int idx)
7813 {
7814 if (qemu_loglevel_mask(CPU_LOG_INT)) {
7815 const char *exc = NULL;
7816 static const char * const excnames[] = {
7817 [EXCP_UDEF] = "Undefined Instruction",
7818 [EXCP_SWI] = "SVC",
7819 [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
7820 [EXCP_DATA_ABORT] = "Data Abort",
7821 [EXCP_IRQ] = "IRQ",
7822 [EXCP_FIQ] = "FIQ",
7823 [EXCP_BKPT] = "Breakpoint",
7824 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
7825 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
7826 [EXCP_HVC] = "Hypervisor Call",
7827 [EXCP_HYP_TRAP] = "Hypervisor Trap",
7828 [EXCP_SMC] = "Secure Monitor Call",
7829 [EXCP_VIRQ] = "Virtual IRQ",
7830 [EXCP_VFIQ] = "Virtual FIQ",
7831 [EXCP_SEMIHOST] = "Semihosting call",
7832 [EXCP_NOCP] = "v7M NOCP UsageFault",
7833 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
7834 [EXCP_STKOF] = "v8M STKOF UsageFault",
7835 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
7836 [EXCP_LSERR] = "v8M LSERR UsageFault",
7837 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
7838 };
7839
7840 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
7841 exc = excnames[idx];
7842 }
7843 if (!exc) {
7844 exc = "unknown";
7845 }
7846 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
7847 }
7848 }
7849
7850 /*
7851 * Function used to synchronize QEMU's AArch64 register set with AArch32
7852 * register set. This is necessary when switching between AArch32 and AArch64
7853 * execution state.
7854 */
aarch64_sync_32_to_64(CPUARMState * env)7855 void aarch64_sync_32_to_64(CPUARMState *env)
7856 {
7857 int i;
7858 uint32_t mode = env->uncached_cpsr & CPSR_M;
7859
7860 /* We can blanket copy R[0:7] to X[0:7] */
7861 for (i = 0; i < 8; i++) {
7862 env->xregs[i] = env->regs[i];
7863 }
7864
7865 /*
7866 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
7867 * Otherwise, they come from the banked user regs.
7868 */
7869 if (mode == ARM_CPU_MODE_FIQ) {
7870 for (i = 8; i < 13; i++) {
7871 env->xregs[i] = env->usr_regs[i - 8];
7872 }
7873 } else {
7874 for (i = 8; i < 13; i++) {
7875 env->xregs[i] = env->regs[i];
7876 }
7877 }
7878
7879 /*
7880 * Registers x13-x23 are the various mode SP and FP registers. Registers
7881 * r13 and r14 are only copied if we are in that mode, otherwise we copy
7882 * from the mode banked register.
7883 */
7884 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
7885 env->xregs[13] = env->regs[13];
7886 env->xregs[14] = env->regs[14];
7887 } else {
7888 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
7889 /* HYP is an exception in that it is copied from r14 */
7890 if (mode == ARM_CPU_MODE_HYP) {
7891 env->xregs[14] = env->regs[14];
7892 } else {
7893 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
7894 }
7895 }
7896
7897 if (mode == ARM_CPU_MODE_HYP) {
7898 env->xregs[15] = env->regs[13];
7899 } else {
7900 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
7901 }
7902
7903 if (mode == ARM_CPU_MODE_IRQ) {
7904 env->xregs[16] = env->regs[14];
7905 env->xregs[17] = env->regs[13];
7906 } else {
7907 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
7908 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
7909 }
7910
7911 if (mode == ARM_CPU_MODE_SVC) {
7912 env->xregs[18] = env->regs[14];
7913 env->xregs[19] = env->regs[13];
7914 } else {
7915 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
7916 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
7917 }
7918
7919 if (mode == ARM_CPU_MODE_ABT) {
7920 env->xregs[20] = env->regs[14];
7921 env->xregs[21] = env->regs[13];
7922 } else {
7923 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
7924 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
7925 }
7926
7927 if (mode == ARM_CPU_MODE_UND) {
7928 env->xregs[22] = env->regs[14];
7929 env->xregs[23] = env->regs[13];
7930 } else {
7931 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
7932 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
7933 }
7934
7935 /*
7936 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
7937 * mode, then we can copy from r8-r14. Otherwise, we copy from the
7938 * FIQ bank for r8-r14.
7939 */
7940 if (mode == ARM_CPU_MODE_FIQ) {
7941 for (i = 24; i < 31; i++) {
7942 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */
7943 }
7944 } else {
7945 for (i = 24; i < 29; i++) {
7946 env->xregs[i] = env->fiq_regs[i - 24];
7947 }
7948 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
7949 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
7950 }
7951
7952 env->pc = env->regs[15];
7953 }
7954
7955 /*
7956 * Function used to synchronize QEMU's AArch32 register set with AArch64
7957 * register set. This is necessary when switching between AArch32 and AArch64
7958 * execution state.
7959 */
aarch64_sync_64_to_32(CPUARMState * env)7960 void aarch64_sync_64_to_32(CPUARMState *env)
7961 {
7962 int i;
7963 uint32_t mode = env->uncached_cpsr & CPSR_M;
7964
7965 /* We can blanket copy X[0:7] to R[0:7] */
7966 for (i = 0; i < 8; i++) {
7967 env->regs[i] = env->xregs[i];
7968 }
7969
7970 /*
7971 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
7972 * Otherwise, we copy x8-x12 into the banked user regs.
7973 */
7974 if (mode == ARM_CPU_MODE_FIQ) {
7975 for (i = 8; i < 13; i++) {
7976 env->usr_regs[i - 8] = env->xregs[i];
7977 }
7978 } else {
7979 for (i = 8; i < 13; i++) {
7980 env->regs[i] = env->xregs[i];
7981 }
7982 }
7983
7984 /*
7985 * Registers r13 & r14 depend on the current mode.
7986 * If we are in a given mode, we copy the corresponding x registers to r13
7987 * and r14. Otherwise, we copy the x register to the banked r13 and r14
7988 * for the mode.
7989 */
7990 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
7991 env->regs[13] = env->xregs[13];
7992 env->regs[14] = env->xregs[14];
7993 } else {
7994 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
7995
7996 /*
7997 * HYP is an exception in that it does not have its own banked r14 but
7998 * shares the USR r14
7999 */
8000 if (mode == ARM_CPU_MODE_HYP) {
8001 env->regs[14] = env->xregs[14];
8002 } else {
8003 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
8004 }
8005 }
8006
8007 if (mode == ARM_CPU_MODE_HYP) {
8008 env->regs[13] = env->xregs[15];
8009 } else {
8010 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
8011 }
8012
8013 if (mode == ARM_CPU_MODE_IRQ) {
8014 env->regs[14] = env->xregs[16];
8015 env->regs[13] = env->xregs[17];
8016 } else {
8017 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
8018 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
8019 }
8020
8021 if (mode == ARM_CPU_MODE_SVC) {
8022 env->regs[14] = env->xregs[18];
8023 env->regs[13] = env->xregs[19];
8024 } else {
8025 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
8026 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
8027 }
8028
8029 if (mode == ARM_CPU_MODE_ABT) {
8030 env->regs[14] = env->xregs[20];
8031 env->regs[13] = env->xregs[21];
8032 } else {
8033 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
8034 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
8035 }
8036
8037 if (mode == ARM_CPU_MODE_UND) {
8038 env->regs[14] = env->xregs[22];
8039 env->regs[13] = env->xregs[23];
8040 } else {
8041 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
8042 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
8043 }
8044
8045 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8046 * mode, then we can copy to r8-r14. Otherwise, we copy to the
8047 * FIQ bank for r8-r14.
8048 */
8049 if (mode == ARM_CPU_MODE_FIQ) {
8050 for (i = 24; i < 31; i++) {
8051 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */
8052 }
8053 } else {
8054 for (i = 24; i < 29; i++) {
8055 env->fiq_regs[i - 24] = env->xregs[i];
8056 }
8057 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
8058 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
8059 }
8060
8061 env->regs[15] = env->pc;
8062 }
8063
take_aarch32_exception(CPUARMState * env,int new_mode,uint32_t mask,uint32_t offset,uint32_t newpc)8064 static void take_aarch32_exception(CPUARMState *env, int new_mode,
8065 uint32_t mask, uint32_t offset,
8066 uint32_t newpc)
8067 {
8068 /* Change the CPU state so as to actually take the exception. */
8069 switch_mode(env, new_mode);
8070 /*
8071 * For exceptions taken to AArch32 we must clear the SS bit in both
8072 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
8073 */
8074 env->uncached_cpsr &= ~PSTATE_SS;
8075 env->spsr = cpsr_read(env);
8076 /* Clear IT bits. */
8077 env->condexec_bits = 0;
8078 /* Switch to the new mode, and to the correct instruction set. */
8079 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
8080 /* Set new mode endianness */
8081 env->uncached_cpsr &= ~CPSR_E;
8082 if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
8083 env->uncached_cpsr |= CPSR_E;
8084 }
8085 /* J and IL must always be cleared for exception entry */
8086 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
8087 env->daif |= mask;
8088
8089 if (new_mode == ARM_CPU_MODE_HYP) {
8090 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
8091 env->elr_el[2] = env->regs[15];
8092 } else {
8093 /*
8094 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
8095 * and we should just guard the thumb mode on V4
8096 */
8097 if (arm_feature(env, ARM_FEATURE_V4T)) {
8098 env->thumb =
8099 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
8100 }
8101 env->regs[14] = env->regs[15] + offset;
8102 }
8103 env->regs[15] = newpc;
8104 arm_rebuild_hflags(env);
8105 }
8106
arm_cpu_do_interrupt_aarch32_hyp(CPUState * cs)8107 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
8108 {
8109 /*
8110 * Handle exception entry to Hyp mode; this is sufficiently
8111 * different to entry to other AArch32 modes that we handle it
8112 * separately here.
8113 *
8114 * The vector table entry used is always the 0x14 Hyp mode entry point,
8115 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
8116 * The offset applied to the preferred return address is always zero
8117 * (see DDI0487C.a section G1.12.3).
8118 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
8119 */
8120 uint32_t addr, mask;
8121 ARMCPU *cpu = ARM_CPU(cs);
8122 CPUARMState *env = &cpu->env;
8123
8124 switch (cs->exception_index) {
8125 case EXCP_UDEF:
8126 addr = 0x04;
8127 break;
8128 case EXCP_SWI:
8129 addr = 0x14;
8130 break;
8131 case EXCP_BKPT:
8132 /* Fall through to prefetch abort. */
8133 case EXCP_PREFETCH_ABORT:
8134 env->cp15.ifar_s = env->exception.vaddress;
8135 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
8136 (uint32_t)env->exception.vaddress);
8137 addr = 0x0c;
8138 break;
8139 case EXCP_DATA_ABORT:
8140 env->cp15.dfar_s = env->exception.vaddress;
8141 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
8142 (uint32_t)env->exception.vaddress);
8143 addr = 0x10;
8144 break;
8145 case EXCP_IRQ:
8146 addr = 0x18;
8147 break;
8148 case EXCP_FIQ:
8149 addr = 0x1c;
8150 break;
8151 case EXCP_HVC:
8152 addr = 0x08;
8153 break;
8154 case EXCP_HYP_TRAP:
8155 addr = 0x14;
8156 break;
8157 default:
8158 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8159 }
8160
8161 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
8162 if (!arm_feature(env, ARM_FEATURE_V8)) {
8163 /*
8164 * QEMU syndrome values are v8-style. v7 has the IL bit
8165 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
8166 * If this is a v7 CPU, squash the IL bit in those cases.
8167 */
8168 if (cs->exception_index == EXCP_PREFETCH_ABORT ||
8169 (cs->exception_index == EXCP_DATA_ABORT &&
8170 !(env->exception.syndrome & ARM_EL_ISV)) ||
8171 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
8172 env->exception.syndrome &= ~ARM_EL_IL;
8173 }
8174 }
8175 env->cp15.esr_el[2] = env->exception.syndrome;
8176 }
8177
8178 if (arm_current_el(env) != 2 && addr < 0x14) {
8179 addr = 0x14;
8180 }
8181
8182 mask = 0;
8183 if (!(env->cp15.scr_el3 & SCR_EA)) {
8184 mask |= CPSR_A;
8185 }
8186 if (!(env->cp15.scr_el3 & SCR_IRQ)) {
8187 mask |= CPSR_I;
8188 }
8189 if (!(env->cp15.scr_el3 & SCR_FIQ)) {
8190 mask |= CPSR_F;
8191 }
8192
8193 addr += env->cp15.hvbar;
8194
8195 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
8196 }
8197
arm_cpu_do_interrupt_aarch32(CPUState * cs)8198 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
8199 {
8200 ARMCPU *cpu = ARM_CPU(cs);
8201 CPUARMState *env = &cpu->env;
8202 uint32_t addr;
8203 uint32_t mask;
8204 int new_mode;
8205 uint32_t offset;
8206 uint32_t moe;
8207
8208 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
8209 switch (syn_get_ec(env->exception.syndrome)) {
8210 case EC_BREAKPOINT:
8211 case EC_BREAKPOINT_SAME_EL:
8212 moe = 1;
8213 break;
8214 case EC_WATCHPOINT:
8215 case EC_WATCHPOINT_SAME_EL:
8216 moe = 10;
8217 break;
8218 case EC_AA32_BKPT:
8219 moe = 3;
8220 break;
8221 case EC_VECTORCATCH:
8222 moe = 5;
8223 break;
8224 default:
8225 moe = 0;
8226 break;
8227 }
8228
8229 if (moe) {
8230 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
8231 }
8232
8233 if (env->exception.target_el == 2) {
8234 arm_cpu_do_interrupt_aarch32_hyp(cs);
8235 return;
8236 }
8237
8238 switch (cs->exception_index) {
8239 case EXCP_UDEF:
8240 new_mode = ARM_CPU_MODE_UND;
8241 addr = 0x04;
8242 mask = CPSR_I;
8243 if (env->thumb)
8244 offset = 2;
8245 else
8246 offset = 4;
8247 break;
8248 case EXCP_SWI:
8249 new_mode = ARM_CPU_MODE_SVC;
8250 addr = 0x08;
8251 mask = CPSR_I;
8252 /* The PC already points to the next instruction. */
8253 offset = 0;
8254 break;
8255 case EXCP_BKPT:
8256 /* Fall through to prefetch abort. */
8257 case EXCP_PREFETCH_ABORT:
8258 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
8259 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
8260 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
8261 env->exception.fsr, (uint32_t)env->exception.vaddress);
8262 new_mode = ARM_CPU_MODE_ABT;
8263 addr = 0x0c;
8264 mask = CPSR_A | CPSR_I;
8265 offset = 4;
8266 break;
8267 case EXCP_DATA_ABORT:
8268 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
8269 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
8270 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
8271 env->exception.fsr,
8272 (uint32_t)env->exception.vaddress);
8273 new_mode = ARM_CPU_MODE_ABT;
8274 addr = 0x10;
8275 mask = CPSR_A | CPSR_I;
8276 offset = 8;
8277 break;
8278 case EXCP_IRQ:
8279 new_mode = ARM_CPU_MODE_IRQ;
8280 addr = 0x18;
8281 /* Disable IRQ and imprecise data aborts. */
8282 mask = CPSR_A | CPSR_I;
8283 offset = 4;
8284 if (env->cp15.scr_el3 & SCR_IRQ) {
8285 /* IRQ routed to monitor mode */
8286 new_mode = ARM_CPU_MODE_MON;
8287 mask |= CPSR_F;
8288 }
8289 break;
8290 case EXCP_FIQ:
8291 new_mode = ARM_CPU_MODE_FIQ;
8292 addr = 0x1c;
8293 /* Disable FIQ, IRQ and imprecise data aborts. */
8294 mask = CPSR_A | CPSR_I | CPSR_F;
8295 if (env->cp15.scr_el3 & SCR_FIQ) {
8296 /* FIQ routed to monitor mode */
8297 new_mode = ARM_CPU_MODE_MON;
8298 }
8299 offset = 4;
8300 break;
8301 case EXCP_VIRQ:
8302 new_mode = ARM_CPU_MODE_IRQ;
8303 addr = 0x18;
8304 /* Disable IRQ and imprecise data aborts. */
8305 mask = CPSR_A | CPSR_I;
8306 offset = 4;
8307 break;
8308 case EXCP_VFIQ:
8309 new_mode = ARM_CPU_MODE_FIQ;
8310 addr = 0x1c;
8311 /* Disable FIQ, IRQ and imprecise data aborts. */
8312 mask = CPSR_A | CPSR_I | CPSR_F;
8313 offset = 4;
8314 break;
8315 case EXCP_SMC:
8316 new_mode = ARM_CPU_MODE_MON;
8317 addr = 0x08;
8318 mask = CPSR_A | CPSR_I | CPSR_F;
8319 offset = 0;
8320 break;
8321 default:
8322 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8323 return; /* Never happens. Keep compiler happy. */
8324 }
8325
8326 if (new_mode == ARM_CPU_MODE_MON) {
8327 addr += env->cp15.mvbar;
8328 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
8329 /* High vectors. When enabled, base address cannot be remapped. */
8330 addr += 0xffff0000;
8331 } else {
8332 /* ARM v7 architectures provide a vector base address register to remap
8333 * the interrupt vector table.
8334 * This register is only followed in non-monitor mode, and is banked.
8335 * Note: only bits 31:5 are valid.
8336 */
8337 addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
8338 }
8339
8340 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
8341 env->cp15.scr_el3 &= ~SCR_NS;
8342 }
8343
8344 take_aarch32_exception(env, new_mode, mask, offset, addr);
8345 }
8346
8347 /* Handle exception entry to a target EL which is using AArch64 */
arm_cpu_do_interrupt_aarch64(CPUState * cs)8348 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
8349 {
8350 ARMCPU *cpu = ARM_CPU(cs);
8351 CPUARMState *env = &cpu->env;
8352 unsigned int new_el = env->exception.target_el;
8353 target_ulong addr = env->cp15.vbar_el[new_el];
8354 unsigned int new_mode = aarch64_pstate_mode(new_el, true);
8355 unsigned int cur_el = arm_current_el(env);
8356
8357 /*
8358 * Note that new_el can never be 0. If cur_el is 0, then
8359 * el0_a64 is is_a64(), else el0_a64 is ignored.
8360 */
8361 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
8362
8363 if (cur_el < new_el) {
8364 /* Entry vector offset depends on whether the implemented EL
8365 * immediately lower than the target level is using AArch32 or AArch64
8366 */
8367 bool is_aa64;
8368
8369 switch (new_el) {
8370 case 3:
8371 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
8372 break;
8373 case 2:
8374 is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0;
8375 break;
8376 case 1:
8377 is_aa64 = is_a64(env);
8378 break;
8379 default:
8380 g_assert_not_reached();
8381 }
8382
8383 if (is_aa64) {
8384 addr += 0x400;
8385 } else {
8386 addr += 0x600;
8387 }
8388 } else if (pstate_read(env) & PSTATE_SP) {
8389 addr += 0x200;
8390 }
8391
8392 switch (cs->exception_index) {
8393 case EXCP_PREFETCH_ABORT:
8394 case EXCP_DATA_ABORT:
8395 env->cp15.far_el[new_el] = env->exception.vaddress;
8396 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
8397 env->cp15.far_el[new_el]);
8398 /* fall through */
8399 case EXCP_BKPT:
8400 case EXCP_UDEF:
8401 case EXCP_SWI:
8402 case EXCP_HVC:
8403 case EXCP_HYP_TRAP:
8404 case EXCP_SMC:
8405 if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) {
8406 /*
8407 * QEMU internal FP/SIMD syndromes from AArch32 include the
8408 * TA and coproc fields which are only exposed if the exception
8409 * is taken to AArch32 Hyp mode. Mask them out to get a valid
8410 * AArch64 format syndrome.
8411 */
8412 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
8413 }
8414 env->cp15.esr_el[new_el] = env->exception.syndrome;
8415 break;
8416 case EXCP_IRQ:
8417 case EXCP_VIRQ:
8418 addr += 0x80;
8419 break;
8420 case EXCP_FIQ:
8421 case EXCP_VFIQ:
8422 addr += 0x100;
8423 break;
8424 case EXCP_SEMIHOST:
8425 qemu_log_mask(CPU_LOG_INT,
8426 "...handling as semihosting call 0x%" PRIx64 "\n",
8427 env->xregs[0]);
8428 env->xregs[0] = do_arm_semihosting(env);
8429 return;
8430 default:
8431 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8432 }
8433
8434 if (is_a64(env)) {
8435 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
8436 aarch64_save_sp(env, arm_current_el(env));
8437 env->elr_el[new_el] = env->pc;
8438 } else {
8439 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
8440 env->elr_el[new_el] = env->regs[15];
8441
8442 aarch64_sync_32_to_64(env);
8443
8444 env->condexec_bits = 0;
8445 }
8446 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
8447 env->elr_el[new_el]);
8448
8449 pstate_write(env, PSTATE_DAIF | new_mode);
8450 env->aarch64 = 1;
8451 aarch64_restore_sp(env, new_el);
8452 helper_rebuild_hflags_a64(env, new_el);
8453
8454 env->pc = addr;
8455
8456 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
8457 new_el, env->pc, pstate_read(env));
8458 }
8459
8460 /*
8461 * Do semihosting call and set the appropriate return value. All the
8462 * permission and validity checks have been done at translate time.
8463 *
8464 * We only see semihosting exceptions in TCG only as they are not
8465 * trapped to the hypervisor in KVM.
8466 */
8467 #ifdef CONFIG_TCG
handle_semihosting(CPUState * cs)8468 static void handle_semihosting(CPUState *cs)
8469 {
8470 ARMCPU *cpu = ARM_CPU(cs);
8471 CPUARMState *env = &cpu->env;
8472
8473 if (is_a64(env)) {
8474 qemu_log_mask(CPU_LOG_INT,
8475 "...handling as semihosting call 0x%" PRIx64 "\n",
8476 env->xregs[0]);
8477 env->xregs[0] = do_arm_semihosting(env);
8478 } else {
8479 qemu_log_mask(CPU_LOG_INT,
8480 "...handling as semihosting call 0x%x\n",
8481 env->regs[0]);
8482 env->regs[0] = do_arm_semihosting(env);
8483 }
8484 }
8485 #endif
8486
8487 /* Handle a CPU exception for A and R profile CPUs.
8488 * Do any appropriate logging, handle PSCI calls, and then hand off
8489 * to the AArch64-entry or AArch32-entry function depending on the
8490 * target exception level's register width.
8491 */
arm_cpu_do_interrupt(CPUState * cs)8492 void arm_cpu_do_interrupt(CPUState *cs)
8493 {
8494 ARMCPU *cpu = ARM_CPU(cs);
8495 CPUARMState *env = &cpu->env;
8496 unsigned int new_el = env->exception.target_el;
8497
8498 assert(!arm_feature(env, ARM_FEATURE_M));
8499
8500 arm_log_exception(cs->exception_index);
8501 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
8502 new_el);
8503 if (qemu_loglevel_mask(CPU_LOG_INT)
8504 && !excp_is_internal(cs->exception_index)) {
8505 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
8506 syn_get_ec(env->exception.syndrome),
8507 env->exception.syndrome);
8508 }
8509
8510 if (arm_is_psci_call(cpu, cs->exception_index)) {
8511 arm_handle_psci_call(cpu);
8512 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
8513 return;
8514 }
8515
8516 /*
8517 * Semihosting semantics depend on the register width of the code
8518 * that caused the exception, not the target exception level, so
8519 * must be handled here.
8520 */
8521 #ifdef CONFIG_TCG
8522 if (cs->exception_index == EXCP_SEMIHOST) {
8523 handle_semihosting(cs);
8524 return;
8525 }
8526 #endif
8527
8528 /* Hooks may change global state so BQL should be held, also the
8529 * BQL needs to be held for any modification of
8530 * cs->interrupt_request.
8531 */
8532 g_assert(qemu_mutex_iothread_locked());
8533
8534 arm_call_pre_el_change_hook(cpu);
8535
8536 assert(!excp_is_internal(cs->exception_index));
8537 if (arm_el_is_aa64(env, new_el)) {
8538 arm_cpu_do_interrupt_aarch64(cs);
8539 } else {
8540 arm_cpu_do_interrupt_aarch32(cs);
8541 }
8542
8543 arm_call_el_change_hook(cpu);
8544
8545 if (!kvm_enabled()) {
8546 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
8547 }
8548 }
8549 #endif /* !CONFIG_USER_ONLY */
8550
8551 /* Return the exception level which controls this address translation regime */
regime_el(CPUARMState * env,ARMMMUIdx mmu_idx)8552 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
8553 {
8554 switch (mmu_idx) {
8555 case ARMMMUIdx_S2NS:
8556 case ARMMMUIdx_S1E2:
8557 return 2;
8558 case ARMMMUIdx_S1E3:
8559 return 3;
8560 case ARMMMUIdx_S1SE0:
8561 return arm_el_is_aa64(env, 3) ? 1 : 3;
8562 case ARMMMUIdx_S1SE1:
8563 case ARMMMUIdx_S1NSE0:
8564 case ARMMMUIdx_S1NSE1:
8565 case ARMMMUIdx_MPrivNegPri:
8566 case ARMMMUIdx_MUserNegPri:
8567 case ARMMMUIdx_MPriv:
8568 case ARMMMUIdx_MUser:
8569 case ARMMMUIdx_MSPrivNegPri:
8570 case ARMMMUIdx_MSUserNegPri:
8571 case ARMMMUIdx_MSPriv:
8572 case ARMMMUIdx_MSUser:
8573 return 1;
8574 default:
8575 g_assert_not_reached();
8576 }
8577 }
8578
8579 #ifndef CONFIG_USER_ONLY
8580
8581 /* Return the SCTLR value which controls this address translation regime */
regime_sctlr(CPUARMState * env,ARMMMUIdx mmu_idx)8582 static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
8583 {
8584 return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
8585 }
8586
8587 /* Return true if the specified stage of address translation is disabled */
regime_translation_disabled(CPUARMState * env,ARMMMUIdx mmu_idx)8588 static inline bool regime_translation_disabled(CPUARMState *env,
8589 ARMMMUIdx mmu_idx)
8590 {
8591 if (arm_feature(env, ARM_FEATURE_M)) {
8592 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
8593 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
8594 case R_V7M_MPU_CTRL_ENABLE_MASK:
8595 /* Enabled, but not for HardFault and NMI */
8596 return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
8597 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
8598 /* Enabled for all cases */
8599 return false;
8600 case 0:
8601 default:
8602 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
8603 * we warned about that in armv7m_nvic.c when the guest set it.
8604 */
8605 return true;
8606 }
8607 }
8608
8609 if (mmu_idx == ARMMMUIdx_S2NS) {
8610 /* HCR.DC means HCR.VM behaves as 1 */
8611 return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0;
8612 }
8613
8614 if (env->cp15.hcr_el2 & HCR_TGE) {
8615 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
8616 if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
8617 return true;
8618 }
8619 }
8620
8621 if ((env->cp15.hcr_el2 & HCR_DC) &&
8622 (mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1)) {
8623 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
8624 return true;
8625 }
8626
8627 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
8628 }
8629
regime_translation_big_endian(CPUARMState * env,ARMMMUIdx mmu_idx)8630 static inline bool regime_translation_big_endian(CPUARMState *env,
8631 ARMMMUIdx mmu_idx)
8632 {
8633 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
8634 }
8635
8636 /* Return the TTBR associated with this translation regime */
regime_ttbr(CPUARMState * env,ARMMMUIdx mmu_idx,int ttbrn)8637 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
8638 int ttbrn)
8639 {
8640 if (mmu_idx == ARMMMUIdx_S2NS) {
8641 return env->cp15.vttbr_el2;
8642 }
8643 if (ttbrn == 0) {
8644 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
8645 } else {
8646 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
8647 }
8648 }
8649
8650 #endif /* !CONFIG_USER_ONLY */
8651
8652 /* Return the TCR controlling this translation regime */
regime_tcr(CPUARMState * env,ARMMMUIdx mmu_idx)8653 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
8654 {
8655 if (mmu_idx == ARMMMUIdx_S2NS) {
8656 return &env->cp15.vtcr_el2;
8657 }
8658 return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
8659 }
8660
8661 /* Convert a possible stage1+2 MMU index into the appropriate
8662 * stage 1 MMU index
8663 */
stage_1_mmu_idx(ARMMMUIdx mmu_idx)8664 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
8665 {
8666 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
8667 mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0);
8668 }
8669 return mmu_idx;
8670 }
8671
8672 /* Return true if the translation regime is using LPAE format page tables */
regime_using_lpae_format(CPUARMState * env,ARMMMUIdx mmu_idx)8673 static inline bool regime_using_lpae_format(CPUARMState *env,
8674 ARMMMUIdx mmu_idx)
8675 {
8676 int el = regime_el(env, mmu_idx);
8677 if (el == 2 || arm_el_is_aa64(env, el)) {
8678 return true;
8679 }
8680 if (arm_feature(env, ARM_FEATURE_LPAE)
8681 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
8682 return true;
8683 }
8684 return false;
8685 }
8686
8687 /* Returns true if the stage 1 translation regime is using LPAE format page
8688 * tables. Used when raising alignment exceptions, whose FSR changes depending
8689 * on whether the long or short descriptor format is in use. */
arm_s1_regime_using_lpae_format(CPUARMState * env,ARMMMUIdx mmu_idx)8690 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
8691 {
8692 mmu_idx = stage_1_mmu_idx(mmu_idx);
8693
8694 return regime_using_lpae_format(env, mmu_idx);
8695 }
8696
8697 #ifndef CONFIG_USER_ONLY
regime_is_user(CPUARMState * env,ARMMMUIdx mmu_idx)8698 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
8699 {
8700 switch (mmu_idx) {
8701 case ARMMMUIdx_S1SE0:
8702 case ARMMMUIdx_S1NSE0:
8703 case ARMMMUIdx_MUser:
8704 case ARMMMUIdx_MSUser:
8705 case ARMMMUIdx_MUserNegPri:
8706 case ARMMMUIdx_MSUserNegPri:
8707 return true;
8708 default:
8709 return false;
8710 case ARMMMUIdx_S12NSE0:
8711 case ARMMMUIdx_S12NSE1:
8712 g_assert_not_reached();
8713 }
8714 }
8715
8716 /* Translate section/page access permissions to page
8717 * R/W protection flags
8718 *
8719 * @env: CPUARMState
8720 * @mmu_idx: MMU index indicating required translation regime
8721 * @ap: The 3-bit access permissions (AP[2:0])
8722 * @domain_prot: The 2-bit domain access permissions
8723 */
ap_to_rw_prot(CPUARMState * env,ARMMMUIdx mmu_idx,int ap,int domain_prot)8724 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
8725 int ap, int domain_prot)
8726 {
8727 bool is_user = regime_is_user(env, mmu_idx);
8728
8729 if (domain_prot == 3) {
8730 return PAGE_READ | PAGE_WRITE;
8731 }
8732
8733 switch (ap) {
8734 case 0:
8735 if (arm_feature(env, ARM_FEATURE_V7)) {
8736 return 0;
8737 }
8738 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
8739 case SCTLR_S:
8740 return is_user ? 0 : PAGE_READ;
8741 case SCTLR_R:
8742 return PAGE_READ;
8743 default:
8744 return 0;
8745 }
8746 case 1:
8747 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
8748 case 2:
8749 if (is_user) {
8750 return PAGE_READ;
8751 } else {
8752 return PAGE_READ | PAGE_WRITE;
8753 }
8754 case 3:
8755 return PAGE_READ | PAGE_WRITE;
8756 case 4: /* Reserved. */
8757 return 0;
8758 case 5:
8759 return is_user ? 0 : PAGE_READ;
8760 case 6:
8761 return PAGE_READ;
8762 case 7:
8763 if (!arm_feature(env, ARM_FEATURE_V6K)) {
8764 return 0;
8765 }
8766 return PAGE_READ;
8767 default:
8768 g_assert_not_reached();
8769 }
8770 }
8771
8772 /* Translate section/page access permissions to page
8773 * R/W protection flags.
8774 *
8775 * @ap: The 2-bit simple AP (AP[2:1])
8776 * @is_user: TRUE if accessing from PL0
8777 */
simple_ap_to_rw_prot_is_user(int ap,bool is_user)8778 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
8779 {
8780 switch (ap) {
8781 case 0:
8782 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
8783 case 1:
8784 return PAGE_READ | PAGE_WRITE;
8785 case 2:
8786 return is_user ? 0 : PAGE_READ;
8787 case 3:
8788 return PAGE_READ;
8789 default:
8790 g_assert_not_reached();
8791 }
8792 }
8793
8794 static inline int
simple_ap_to_rw_prot(CPUARMState * env,ARMMMUIdx mmu_idx,int ap)8795 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
8796 {
8797 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
8798 }
8799
8800 /* Translate S2 section/page access permissions to protection flags
8801 *
8802 * @env: CPUARMState
8803 * @s2ap: The 2-bit stage2 access permissions (S2AP)
8804 * @xn: XN (execute-never) bit
8805 */
get_S2prot(CPUARMState * env,int s2ap,int xn)8806 static int get_S2prot(CPUARMState *env, int s2ap, int xn)
8807 {
8808 int prot = 0;
8809
8810 if (s2ap & 1) {
8811 prot |= PAGE_READ;
8812 }
8813 if (s2ap & 2) {
8814 prot |= PAGE_WRITE;
8815 }
8816 if (!xn) {
8817 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
8818 prot |= PAGE_EXEC;
8819 }
8820 }
8821 return prot;
8822 }
8823
8824 /* Translate section/page access permissions to protection flags
8825 *
8826 * @env: CPUARMState
8827 * @mmu_idx: MMU index indicating required translation regime
8828 * @is_aa64: TRUE if AArch64
8829 * @ap: The 2-bit simple AP (AP[2:1])
8830 * @ns: NS (non-secure) bit
8831 * @xn: XN (execute-never) bit
8832 * @pxn: PXN (privileged execute-never) bit
8833 */
get_S1prot(CPUARMState * env,ARMMMUIdx mmu_idx,bool is_aa64,int ap,int ns,int xn,int pxn)8834 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
8835 int ap, int ns, int xn, int pxn)
8836 {
8837 bool is_user = regime_is_user(env, mmu_idx);
8838 int prot_rw, user_rw;
8839 bool have_wxn;
8840 int wxn = 0;
8841
8842 assert(mmu_idx != ARMMMUIdx_S2NS);
8843
8844 user_rw = simple_ap_to_rw_prot_is_user(ap, true);
8845 if (is_user) {
8846 prot_rw = user_rw;
8847 } else {
8848 prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
8849 }
8850
8851 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
8852 return prot_rw;
8853 }
8854
8855 /* TODO have_wxn should be replaced with
8856 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
8857 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
8858 * compatible processors have EL2, which is required for [U]WXN.
8859 */
8860 have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
8861
8862 if (have_wxn) {
8863 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
8864 }
8865
8866 if (is_aa64) {
8867 switch (regime_el(env, mmu_idx)) {
8868 case 1:
8869 if (!is_user) {
8870 xn = pxn || (user_rw & PAGE_WRITE);
8871 }
8872 break;
8873 case 2:
8874 case 3:
8875 break;
8876 }
8877 } else if (arm_feature(env, ARM_FEATURE_V7)) {
8878 switch (regime_el(env, mmu_idx)) {
8879 case 1:
8880 case 3:
8881 if (is_user) {
8882 xn = xn || !(user_rw & PAGE_READ);
8883 } else {
8884 int uwxn = 0;
8885 if (have_wxn) {
8886 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
8887 }
8888 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
8889 (uwxn && (user_rw & PAGE_WRITE));
8890 }
8891 break;
8892 case 2:
8893 break;
8894 }
8895 } else {
8896 xn = wxn = 0;
8897 }
8898
8899 if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
8900 return prot_rw;
8901 }
8902 return prot_rw | PAGE_EXEC;
8903 }
8904
get_level1_table_address(CPUARMState * env,ARMMMUIdx mmu_idx,uint32_t * table,uint32_t address)8905 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
8906 uint32_t *table, uint32_t address)
8907 {
8908 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
8909 TCR *tcr = regime_tcr(env, mmu_idx);
8910
8911 if (address & tcr->mask) {
8912 if (tcr->raw_tcr & TTBCR_PD1) {
8913 /* Translation table walk disabled for TTBR1 */
8914 return false;
8915 }
8916 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
8917 } else {
8918 if (tcr->raw_tcr & TTBCR_PD0) {
8919 /* Translation table walk disabled for TTBR0 */
8920 return false;
8921 }
8922 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
8923 }
8924 *table |= (address >> 18) & 0x3ffc;
8925 return true;
8926 }
8927
8928 /* Translate a S1 pagetable walk through S2 if needed. */
S1_ptw_translate(CPUARMState * env,ARMMMUIdx mmu_idx,hwaddr addr,MemTxAttrs txattrs,ARMMMUFaultInfo * fi)8929 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
8930 hwaddr addr, MemTxAttrs txattrs,
8931 ARMMMUFaultInfo *fi)
8932 {
8933 if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
8934 !regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
8935 target_ulong s2size;
8936 hwaddr s2pa;
8937 int s2prot;
8938 int ret;
8939 ARMCacheAttrs cacheattrs = {};
8940 ARMCacheAttrs *pcacheattrs = NULL;
8941
8942 if (env->cp15.hcr_el2 & HCR_PTW) {
8943 /*
8944 * PTW means we must fault if this S1 walk touches S2 Device
8945 * memory; otherwise we don't care about the attributes and can
8946 * save the S2 translation the effort of computing them.
8947 */
8948 pcacheattrs = &cacheattrs;
8949 }
8950
8951 ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
8952 &txattrs, &s2prot, &s2size, fi, pcacheattrs);
8953 if (ret) {
8954 assert(fi->type != ARMFault_None);
8955 fi->s2addr = addr;
8956 fi->stage2 = true;
8957 fi->s1ptw = true;
8958 return ~0;
8959 }
8960 if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) {
8961 /* Access was to Device memory: generate Permission fault */
8962 fi->type = ARMFault_Permission;
8963 fi->s2addr = addr;
8964 fi->stage2 = true;
8965 fi->s1ptw = true;
8966 return ~0;
8967 }
8968 addr = s2pa;
8969 }
8970 return addr;
8971 }
8972
8973 /* All loads done in the course of a page table walk go through here. */
arm_ldl_ptw(CPUState * cs,hwaddr addr,bool is_secure,ARMMMUIdx mmu_idx,ARMMMUFaultInfo * fi)8974 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
8975 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
8976 {
8977 ARMCPU *cpu = ARM_CPU(cs);
8978 CPUARMState *env = &cpu->env;
8979 MemTxAttrs attrs = {};
8980 MemTxResult result = MEMTX_OK;
8981 AddressSpace *as;
8982 uint32_t data;
8983
8984 attrs.secure = is_secure;
8985 as = arm_addressspace(cs, attrs);
8986 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
8987 if (fi->s1ptw) {
8988 return 0;
8989 }
8990 if (regime_translation_big_endian(env, mmu_idx)) {
8991 data = address_space_ldl_be(as, addr, attrs, &result);
8992 } else {
8993 data = address_space_ldl_le(as, addr, attrs, &result);
8994 }
8995 if (result == MEMTX_OK) {
8996 return data;
8997 }
8998 fi->type = ARMFault_SyncExternalOnWalk;
8999 fi->ea = arm_extabort_type(result);
9000 return 0;
9001 }
9002
arm_ldq_ptw(CPUState * cs,hwaddr addr,bool is_secure,ARMMMUIdx mmu_idx,ARMMMUFaultInfo * fi)9003 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
9004 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
9005 {
9006 ARMCPU *cpu = ARM_CPU(cs);
9007 CPUARMState *env = &cpu->env;
9008 MemTxAttrs attrs = {};
9009 MemTxResult result = MEMTX_OK;
9010 AddressSpace *as;
9011 uint64_t data;
9012
9013 attrs.secure = is_secure;
9014 as = arm_addressspace(cs, attrs);
9015 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
9016 if (fi->s1ptw) {
9017 return 0;
9018 }
9019 if (regime_translation_big_endian(env, mmu_idx)) {
9020 data = address_space_ldq_be(as, addr, attrs, &result);
9021 } else {
9022 data = address_space_ldq_le(as, addr, attrs, &result);
9023 }
9024 if (result == MEMTX_OK) {
9025 return data;
9026 }
9027 fi->type = ARMFault_SyncExternalOnWalk;
9028 fi->ea = arm_extabort_type(result);
9029 return 0;
9030 }
9031
get_phys_addr_v5(CPUARMState * env,uint32_t address,MMUAccessType access_type,ARMMMUIdx mmu_idx,hwaddr * phys_ptr,int * prot,target_ulong * page_size,ARMMMUFaultInfo * fi)9032 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
9033 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9034 hwaddr *phys_ptr, int *prot,
9035 target_ulong *page_size,
9036 ARMMMUFaultInfo *fi)
9037 {
9038 CPUState *cs = env_cpu(env);
9039 int level = 1;
9040 uint32_t table;
9041 uint32_t desc;
9042 int type;
9043 int ap;
9044 int domain = 0;
9045 int domain_prot;
9046 hwaddr phys_addr;
9047 uint32_t dacr;
9048
9049 /* Pagetable walk. */
9050 /* Lookup l1 descriptor. */
9051 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
9052 /* Section translation fault if page walk is disabled by PD0 or PD1 */
9053 fi->type = ARMFault_Translation;
9054 goto do_fault;
9055 }
9056 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
9057 mmu_idx, fi);
9058 if (fi->type != ARMFault_None) {
9059 goto do_fault;
9060 }
9061 type = (desc & 3);
9062 domain = (desc >> 5) & 0x0f;
9063 if (regime_el(env, mmu_idx) == 1) {
9064 dacr = env->cp15.dacr_ns;
9065 } else {
9066 dacr = env->cp15.dacr_s;
9067 }
9068 domain_prot = (dacr >> (domain * 2)) & 3;
9069 if (type == 0) {
9070 /* Section translation fault. */
9071 fi->type = ARMFault_Translation;
9072 goto do_fault;
9073 }
9074 if (type != 2) {
9075 level = 2;
9076 }
9077 if (domain_prot == 0 || domain_prot == 2) {
9078 fi->type = ARMFault_Domain;
9079 goto do_fault;
9080 }
9081 if (type == 2) {
9082 /* 1Mb section. */
9083 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
9084 ap = (desc >> 10) & 3;
9085 *page_size = 1024 * 1024;
9086 } else {
9087 /* Lookup l2 entry. */
9088 if (type == 1) {
9089 /* Coarse pagetable. */
9090 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
9091 } else {
9092 /* Fine pagetable. */
9093 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
9094 }
9095 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
9096 mmu_idx, fi);
9097 if (fi->type != ARMFault_None) {
9098 goto do_fault;
9099 }
9100 switch (desc & 3) {
9101 case 0: /* Page translation fault. */
9102 fi->type = ARMFault_Translation;
9103 goto do_fault;
9104 case 1: /* 64k page. */
9105 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
9106 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
9107 *page_size = 0x10000;
9108 break;
9109 case 2: /* 4k page. */
9110 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
9111 ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
9112 *page_size = 0x1000;
9113 break;
9114 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
9115 if (type == 1) {
9116 /* ARMv6/XScale extended small page format */
9117 if (arm_feature(env, ARM_FEATURE_XSCALE)
9118 || arm_feature(env, ARM_FEATURE_V6)) {
9119 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
9120 *page_size = 0x1000;
9121 } else {
9122 /* UNPREDICTABLE in ARMv5; we choose to take a
9123 * page translation fault.
9124 */
9125 fi->type = ARMFault_Translation;
9126 goto do_fault;
9127 }
9128 } else {
9129 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
9130 *page_size = 0x400;
9131 }
9132 ap = (desc >> 4) & 3;
9133 break;
9134 default:
9135 /* Never happens, but compiler isn't smart enough to tell. */
9136 abort();
9137 }
9138 }
9139 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
9140 *prot |= *prot ? PAGE_EXEC : 0;
9141 if (!(*prot & (1 << access_type))) {
9142 /* Access permission fault. */
9143 fi->type = ARMFault_Permission;
9144 goto do_fault;
9145 }
9146 *phys_ptr = phys_addr;
9147 return false;
9148 do_fault:
9149 fi->domain = domain;
9150 fi->level = level;
9151 return true;
9152 }
9153
get_phys_addr_v6(CPUARMState * env,uint32_t address,MMUAccessType access_type,ARMMMUIdx mmu_idx,hwaddr * phys_ptr,MemTxAttrs * attrs,int * prot,target_ulong * page_size,ARMMMUFaultInfo * fi)9154 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
9155 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9156 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
9157 target_ulong *page_size, ARMMMUFaultInfo *fi)
9158 {
9159 CPUState *cs = env_cpu(env);
9160 int level = 1;
9161 uint32_t table;
9162 uint32_t desc;
9163 uint32_t xn;
9164 uint32_t pxn = 0;
9165 int type;
9166 int ap;
9167 int domain = 0;
9168 int domain_prot;
9169 hwaddr phys_addr;
9170 uint32_t dacr;
9171 bool ns;
9172
9173 /* Pagetable walk. */
9174 /* Lookup l1 descriptor. */
9175 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
9176 /* Section translation fault if page walk is disabled by PD0 or PD1 */
9177 fi->type = ARMFault_Translation;
9178 goto do_fault;
9179 }
9180 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
9181 mmu_idx, fi);
9182 if (fi->type != ARMFault_None) {
9183 goto do_fault;
9184 }
9185 type = (desc & 3);
9186 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
9187 /* Section translation fault, or attempt to use the encoding
9188 * which is Reserved on implementations without PXN.
9189 */
9190 fi->type = ARMFault_Translation;
9191 goto do_fault;
9192 }
9193 if ((type == 1) || !(desc & (1 << 18))) {
9194 /* Page or Section. */
9195 domain = (desc >> 5) & 0x0f;
9196 }
9197 if (regime_el(env, mmu_idx) == 1) {
9198 dacr = env->cp15.dacr_ns;
9199 } else {
9200 dacr = env->cp15.dacr_s;
9201 }
9202 if (type == 1) {
9203 level = 2;
9204 }
9205 domain_prot = (dacr >> (domain * 2)) & 3;
9206 if (domain_prot == 0 || domain_prot == 2) {
9207 /* Section or Page domain fault */
9208 fi->type = ARMFault_Domain;
9209 goto do_fault;
9210 }
9211 if (type != 1) {
9212 if (desc & (1 << 18)) {
9213 /* Supersection. */
9214 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
9215 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
9216 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
9217 *page_size = 0x1000000;
9218 } else {
9219 /* Section. */
9220 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
9221 *page_size = 0x100000;
9222 }
9223 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
9224 xn = desc & (1 << 4);
9225 pxn = desc & 1;
9226 ns = extract32(desc, 19, 1);
9227 } else {
9228 if (arm_feature(env, ARM_FEATURE_PXN)) {
9229 pxn = (desc >> 2) & 1;
9230 }
9231 ns = extract32(desc, 3, 1);
9232 /* Lookup l2 entry. */
9233 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
9234 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
9235 mmu_idx, fi);
9236 if (fi->type != ARMFault_None) {
9237 goto do_fault;
9238 }
9239 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
9240 switch (desc & 3) {
9241 case 0: /* Page translation fault. */
9242 fi->type = ARMFault_Translation;
9243 goto do_fault;
9244 case 1: /* 64k page. */
9245 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
9246 xn = desc & (1 << 15);
9247 *page_size = 0x10000;
9248 break;
9249 case 2: case 3: /* 4k page. */
9250 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
9251 xn = desc & 1;
9252 *page_size = 0x1000;
9253 break;
9254 default:
9255 /* Never happens, but compiler isn't smart enough to tell. */
9256 abort();
9257 }
9258 }
9259 if (domain_prot == 3) {
9260 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
9261 } else {
9262 if (pxn && !regime_is_user(env, mmu_idx)) {
9263 xn = 1;
9264 }
9265 if (xn && access_type == MMU_INST_FETCH) {
9266 fi->type = ARMFault_Permission;
9267 goto do_fault;
9268 }
9269
9270 if (arm_feature(env, ARM_FEATURE_V6K) &&
9271 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
9272 /* The simplified model uses AP[0] as an access control bit. */
9273 if ((ap & 1) == 0) {
9274 /* Access flag fault. */
9275 fi->type = ARMFault_AccessFlag;
9276 goto do_fault;
9277 }
9278 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
9279 } else {
9280 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
9281 }
9282 if (*prot && !xn) {
9283 *prot |= PAGE_EXEC;
9284 }
9285 if (!(*prot & (1 << access_type))) {
9286 /* Access permission fault. */
9287 fi->type = ARMFault_Permission;
9288 goto do_fault;
9289 }
9290 }
9291 if (ns) {
9292 /* The NS bit will (as required by the architecture) have no effect if
9293 * the CPU doesn't support TZ or this is a non-secure translation
9294 * regime, because the attribute will already be non-secure.
9295 */
9296 attrs->secure = false;
9297 }
9298 *phys_ptr = phys_addr;
9299 return false;
9300 do_fault:
9301 fi->domain = domain;
9302 fi->level = level;
9303 return true;
9304 }
9305
9306 /*
9307 * check_s2_mmu_setup
9308 * @cpu: ARMCPU
9309 * @is_aa64: True if the translation regime is in AArch64 state
9310 * @startlevel: Suggested starting level
9311 * @inputsize: Bitsize of IPAs
9312 * @stride: Page-table stride (See the ARM ARM)
9313 *
9314 * Returns true if the suggested S2 translation parameters are OK and
9315 * false otherwise.
9316 */
check_s2_mmu_setup(ARMCPU * cpu,bool is_aa64,int level,int inputsize,int stride)9317 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
9318 int inputsize, int stride)
9319 {
9320 const int grainsize = stride + 3;
9321 int startsizecheck;
9322
9323 /* Negative levels are never allowed. */
9324 if (level < 0) {
9325 return false;
9326 }
9327
9328 startsizecheck = inputsize - ((3 - level) * stride + grainsize);
9329 if (startsizecheck < 1 || startsizecheck > stride + 4) {
9330 return false;
9331 }
9332
9333 if (is_aa64) {
9334 CPUARMState *env = &cpu->env;
9335 unsigned int pamax = arm_pamax(cpu);
9336
9337 switch (stride) {
9338 case 13: /* 64KB Pages. */
9339 if (level == 0 || (level == 1 && pamax <= 42)) {
9340 return false;
9341 }
9342 break;
9343 case 11: /* 16KB Pages. */
9344 if (level == 0 || (level == 1 && pamax <= 40)) {
9345 return false;
9346 }
9347 break;
9348 case 9: /* 4KB Pages. */
9349 if (level == 0 && pamax <= 42) {
9350 return false;
9351 }
9352 break;
9353 default:
9354 g_assert_not_reached();
9355 }
9356
9357 /* Inputsize checks. */
9358 if (inputsize > pamax &&
9359 (arm_el_is_aa64(env, 1) || inputsize > 40)) {
9360 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
9361 return false;
9362 }
9363 } else {
9364 /* AArch32 only supports 4KB pages. Assert on that. */
9365 assert(stride == 9);
9366
9367 if (level == 0) {
9368 return false;
9369 }
9370 }
9371 return true;
9372 }
9373
9374 /* Translate from the 4-bit stage 2 representation of
9375 * memory attributes (without cache-allocation hints) to
9376 * the 8-bit representation of the stage 1 MAIR registers
9377 * (which includes allocation hints).
9378 *
9379 * ref: shared/translation/attrs/S2AttrDecode()
9380 * .../S2ConvertAttrsHints()
9381 */
convert_stage2_attrs(CPUARMState * env,uint8_t s2attrs)9382 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
9383 {
9384 uint8_t hiattr = extract32(s2attrs, 2, 2);
9385 uint8_t loattr = extract32(s2attrs, 0, 2);
9386 uint8_t hihint = 0, lohint = 0;
9387
9388 if (hiattr != 0) { /* normal memory */
9389 if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */
9390 hiattr = loattr = 1; /* non-cacheable */
9391 } else {
9392 if (hiattr != 1) { /* Write-through or write-back */
9393 hihint = 3; /* RW allocate */
9394 }
9395 if (loattr != 1) { /* Write-through or write-back */
9396 lohint = 3; /* RW allocate */
9397 }
9398 }
9399 }
9400
9401 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
9402 }
9403 #endif /* !CONFIG_USER_ONLY */
9404
aa64_va_parameters_both(CPUARMState * env,uint64_t va,ARMMMUIdx mmu_idx)9405 ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
9406 ARMMMUIdx mmu_idx)
9407 {
9408 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
9409 uint32_t el = regime_el(env, mmu_idx);
9410 bool tbi, tbid, epd, hpd, using16k, using64k;
9411 int select, tsz;
9412
9413 /*
9414 * Bit 55 is always between the two regions, and is canonical for
9415 * determining if address tagging is enabled.
9416 */
9417 select = extract64(va, 55, 1);
9418
9419 if (el > 1) {
9420 tsz = extract32(tcr, 0, 6);
9421 using64k = extract32(tcr, 14, 1);
9422 using16k = extract32(tcr, 15, 1);
9423 if (mmu_idx == ARMMMUIdx_S2NS) {
9424 /* VTCR_EL2 */
9425 tbi = tbid = hpd = false;
9426 } else {
9427 tbi = extract32(tcr, 20, 1);
9428 hpd = extract32(tcr, 24, 1);
9429 tbid = extract32(tcr, 29, 1);
9430 }
9431 epd = false;
9432 } else if (!select) {
9433 tsz = extract32(tcr, 0, 6);
9434 epd = extract32(tcr, 7, 1);
9435 using64k = extract32(tcr, 14, 1);
9436 using16k = extract32(tcr, 15, 1);
9437 tbi = extract64(tcr, 37, 1);
9438 hpd = extract64(tcr, 41, 1);
9439 tbid = extract64(tcr, 51, 1);
9440 } else {
9441 int tg = extract32(tcr, 30, 2);
9442 using16k = tg == 1;
9443 using64k = tg == 3;
9444 tsz = extract32(tcr, 16, 6);
9445 epd = extract32(tcr, 23, 1);
9446 tbi = extract64(tcr, 38, 1);
9447 hpd = extract64(tcr, 42, 1);
9448 tbid = extract64(tcr, 52, 1);
9449 }
9450 tsz = MIN(tsz, 39); /* TODO: ARMv8.4-TTST */
9451 tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */
9452
9453 return (ARMVAParameters) {
9454 .tsz = tsz,
9455 .select = select,
9456 .tbi = tbi,
9457 .tbid = tbid,
9458 .epd = epd,
9459 .hpd = hpd,
9460 .using16k = using16k,
9461 .using64k = using64k,
9462 };
9463 }
9464
aa64_va_parameters(CPUARMState * env,uint64_t va,ARMMMUIdx mmu_idx,bool data)9465 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
9466 ARMMMUIdx mmu_idx, bool data)
9467 {
9468 ARMVAParameters ret = aa64_va_parameters_both(env, va, mmu_idx);
9469
9470 /* Present TBI as a composite with TBID. */
9471 ret.tbi &= (data || !ret.tbid);
9472 return ret;
9473 }
9474
9475 #ifndef CONFIG_USER_ONLY
aa32_va_parameters(CPUARMState * env,uint32_t va,ARMMMUIdx mmu_idx)9476 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
9477 ARMMMUIdx mmu_idx)
9478 {
9479 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
9480 uint32_t el = regime_el(env, mmu_idx);
9481 int select, tsz;
9482 bool epd, hpd;
9483
9484 if (mmu_idx == ARMMMUIdx_S2NS) {
9485 /* VTCR */
9486 bool sext = extract32(tcr, 4, 1);
9487 bool sign = extract32(tcr, 3, 1);
9488
9489 /*
9490 * If the sign-extend bit is not the same as t0sz[3], the result
9491 * is unpredictable. Flag this as a guest error.
9492 */
9493 if (sign != sext) {
9494 qemu_log_mask(LOG_GUEST_ERROR,
9495 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
9496 }
9497 tsz = sextract32(tcr, 0, 4) + 8;
9498 select = 0;
9499 hpd = false;
9500 epd = false;
9501 } else if (el == 2) {
9502 /* HTCR */
9503 tsz = extract32(tcr, 0, 3);
9504 select = 0;
9505 hpd = extract64(tcr, 24, 1);
9506 epd = false;
9507 } else {
9508 int t0sz = extract32(tcr, 0, 3);
9509 int t1sz = extract32(tcr, 16, 3);
9510
9511 if (t1sz == 0) {
9512 select = va > (0xffffffffu >> t0sz);
9513 } else {
9514 /* Note that we will detect errors later. */
9515 select = va >= ~(0xffffffffu >> t1sz);
9516 }
9517 if (!select) {
9518 tsz = t0sz;
9519 epd = extract32(tcr, 7, 1);
9520 hpd = extract64(tcr, 41, 1);
9521 } else {
9522 tsz = t1sz;
9523 epd = extract32(tcr, 23, 1);
9524 hpd = extract64(tcr, 42, 1);
9525 }
9526 /* For aarch32, hpd0 is not enabled without t2e as well. */
9527 hpd &= extract32(tcr, 6, 1);
9528 }
9529
9530 return (ARMVAParameters) {
9531 .tsz = tsz,
9532 .select = select,
9533 .epd = epd,
9534 .hpd = hpd,
9535 };
9536 }
9537
get_phys_addr_lpae(CPUARMState * env,target_ulong address,MMUAccessType access_type,ARMMMUIdx mmu_idx,hwaddr * phys_ptr,MemTxAttrs * txattrs,int * prot,target_ulong * page_size_ptr,ARMMMUFaultInfo * fi,ARMCacheAttrs * cacheattrs)9538 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
9539 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9540 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
9541 target_ulong *page_size_ptr,
9542 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
9543 {
9544 ARMCPU *cpu = env_archcpu(env);
9545 CPUState *cs = CPU(cpu);
9546 /* Read an LPAE long-descriptor translation table. */
9547 ARMFaultType fault_type = ARMFault_Translation;
9548 uint32_t level;
9549 ARMVAParameters param;
9550 uint64_t ttbr;
9551 hwaddr descaddr, indexmask, indexmask_grainsize;
9552 uint32_t tableattrs;
9553 target_ulong page_size;
9554 uint32_t attrs;
9555 int32_t stride;
9556 int addrsize, inputsize;
9557 TCR *tcr = regime_tcr(env, mmu_idx);
9558 int ap, ns, xn, pxn;
9559 uint32_t el = regime_el(env, mmu_idx);
9560 bool ttbr1_valid;
9561 uint64_t descaddrmask;
9562 bool aarch64 = arm_el_is_aa64(env, el);
9563 bool guarded = false;
9564
9565 /* TODO:
9566 * This code does not handle the different format TCR for VTCR_EL2.
9567 * This code also does not support shareability levels.
9568 * Attribute and permission bit handling should also be checked when adding
9569 * support for those page table walks.
9570 */
9571 if (aarch64) {
9572 param = aa64_va_parameters(env, address, mmu_idx,
9573 access_type != MMU_INST_FETCH);
9574 level = 0;
9575 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
9576 * invalid.
9577 */
9578 ttbr1_valid = (el < 2);
9579 addrsize = 64 - 8 * param.tbi;
9580 inputsize = 64 - param.tsz;
9581 } else {
9582 param = aa32_va_parameters(env, address, mmu_idx);
9583 level = 1;
9584 /* There is no TTBR1 for EL2 */
9585 ttbr1_valid = (el != 2);
9586 addrsize = (mmu_idx == ARMMMUIdx_S2NS ? 40 : 32);
9587 inputsize = addrsize - param.tsz;
9588 }
9589
9590 /*
9591 * We determined the region when collecting the parameters, but we
9592 * have not yet validated that the address is valid for the region.
9593 * Extract the top bits and verify that they all match select.
9594 *
9595 * For aa32, if inputsize == addrsize, then we have selected the
9596 * region by exclusion in aa32_va_parameters and there is no more
9597 * validation to do here.
9598 */
9599 if (inputsize < addrsize) {
9600 target_ulong top_bits = sextract64(address, inputsize,
9601 addrsize - inputsize);
9602 if (-top_bits != param.select || (param.select && !ttbr1_valid)) {
9603 /* The gap between the two regions is a Translation fault */
9604 fault_type = ARMFault_Translation;
9605 goto do_fault;
9606 }
9607 }
9608
9609 if (param.using64k) {
9610 stride = 13;
9611 } else if (param.using16k) {
9612 stride = 11;
9613 } else {
9614 stride = 9;
9615 }
9616
9617 /* Note that QEMU ignores shareability and cacheability attributes,
9618 * so we don't need to do anything with the SH, ORGN, IRGN fields
9619 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
9620 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
9621 * implement any ASID-like capability so we can ignore it (instead
9622 * we will always flush the TLB any time the ASID is changed).
9623 */
9624 ttbr = regime_ttbr(env, mmu_idx, param.select);
9625
9626 /* Here we should have set up all the parameters for the translation:
9627 * inputsize, ttbr, epd, stride, tbi
9628 */
9629
9630 if (param.epd) {
9631 /* Translation table walk disabled => Translation fault on TLB miss
9632 * Note: This is always 0 on 64-bit EL2 and EL3.
9633 */
9634 goto do_fault;
9635 }
9636
9637 if (mmu_idx != ARMMMUIdx_S2NS) {
9638 /* The starting level depends on the virtual address size (which can
9639 * be up to 48 bits) and the translation granule size. It indicates
9640 * the number of strides (stride bits at a time) needed to
9641 * consume the bits of the input address. In the pseudocode this is:
9642 * level = 4 - RoundUp((inputsize - grainsize) / stride)
9643 * where their 'inputsize' is our 'inputsize', 'grainsize' is
9644 * our 'stride + 3' and 'stride' is our 'stride'.
9645 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
9646 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
9647 * = 4 - (inputsize - 4) / stride;
9648 */
9649 level = 4 - (inputsize - 4) / stride;
9650 } else {
9651 /* For stage 2 translations the starting level is specified by the
9652 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
9653 */
9654 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
9655 uint32_t startlevel;
9656 bool ok;
9657
9658 if (!aarch64 || stride == 9) {
9659 /* AArch32 or 4KB pages */
9660 startlevel = 2 - sl0;
9661 } else {
9662 /* 16KB or 64KB pages */
9663 startlevel = 3 - sl0;
9664 }
9665
9666 /* Check that the starting level is valid. */
9667 ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
9668 inputsize, stride);
9669 if (!ok) {
9670 fault_type = ARMFault_Translation;
9671 goto do_fault;
9672 }
9673 level = startlevel;
9674 }
9675
9676 indexmask_grainsize = (1ULL << (stride + 3)) - 1;
9677 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
9678
9679 /* Now we can extract the actual base address from the TTBR */
9680 descaddr = extract64(ttbr, 0, 48);
9681 descaddr &= ~indexmask;
9682
9683 /* The address field in the descriptor goes up to bit 39 for ARMv7
9684 * but up to bit 47 for ARMv8, but we use the descaddrmask
9685 * up to bit 39 for AArch32, because we don't need other bits in that case
9686 * to construct next descriptor address (anyway they should be all zeroes).
9687 */
9688 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
9689 ~indexmask_grainsize;
9690
9691 /* Secure accesses start with the page table in secure memory and
9692 * can be downgraded to non-secure at any step. Non-secure accesses
9693 * remain non-secure. We implement this by just ORing in the NSTable/NS
9694 * bits at each step.
9695 */
9696 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
9697 for (;;) {
9698 uint64_t descriptor;
9699 bool nstable;
9700
9701 descaddr |= (address >> (stride * (4 - level))) & indexmask;
9702 descaddr &= ~7ULL;
9703 nstable = extract32(tableattrs, 4, 1);
9704 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
9705 if (fi->type != ARMFault_None) {
9706 goto do_fault;
9707 }
9708
9709 if (!(descriptor & 1) ||
9710 (!(descriptor & 2) && (level == 3))) {
9711 /* Invalid, or the Reserved level 3 encoding */
9712 goto do_fault;
9713 }
9714 descaddr = descriptor & descaddrmask;
9715
9716 if ((descriptor & 2) && (level < 3)) {
9717 /* Table entry. The top five bits are attributes which may
9718 * propagate down through lower levels of the table (and
9719 * which are all arranged so that 0 means "no effect", so
9720 * we can gather them up by ORing in the bits at each level).
9721 */
9722 tableattrs |= extract64(descriptor, 59, 5);
9723 level++;
9724 indexmask = indexmask_grainsize;
9725 continue;
9726 }
9727 /* Block entry at level 1 or 2, or page entry at level 3.
9728 * These are basically the same thing, although the number
9729 * of bits we pull in from the vaddr varies.
9730 */
9731 page_size = (1ULL << ((stride * (4 - level)) + 3));
9732 descaddr |= (address & (page_size - 1));
9733 /* Extract attributes from the descriptor */
9734 attrs = extract64(descriptor, 2, 10)
9735 | (extract64(descriptor, 52, 12) << 10);
9736
9737 if (mmu_idx == ARMMMUIdx_S2NS) {
9738 /* Stage 2 table descriptors do not include any attribute fields */
9739 break;
9740 }
9741 /* Merge in attributes from table descriptors */
9742 attrs |= nstable << 3; /* NS */
9743 guarded = extract64(descriptor, 50, 1); /* GP */
9744 if (param.hpd) {
9745 /* HPD disables all the table attributes except NSTable. */
9746 break;
9747 }
9748 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
9749 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
9750 * means "force PL1 access only", which means forcing AP[1] to 0.
9751 */
9752 attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */
9753 attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */
9754 break;
9755 }
9756 /* Here descaddr is the final physical address, and attributes
9757 * are all in attrs.
9758 */
9759 fault_type = ARMFault_AccessFlag;
9760 if ((attrs & (1 << 8)) == 0) {
9761 /* Access flag */
9762 goto do_fault;
9763 }
9764
9765 ap = extract32(attrs, 4, 2);
9766 xn = extract32(attrs, 12, 1);
9767
9768 if (mmu_idx == ARMMMUIdx_S2NS) {
9769 ns = true;
9770 *prot = get_S2prot(env, ap, xn);
9771 } else {
9772 ns = extract32(attrs, 3, 1);
9773 pxn = extract32(attrs, 11, 1);
9774 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
9775 }
9776
9777 fault_type = ARMFault_Permission;
9778 if (!(*prot & (1 << access_type))) {
9779 goto do_fault;
9780 }
9781
9782 if (ns) {
9783 /* The NS bit will (as required by the architecture) have no effect if
9784 * the CPU doesn't support TZ or this is a non-secure translation
9785 * regime, because the attribute will already be non-secure.
9786 */
9787 txattrs->secure = false;
9788 }
9789 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
9790 if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
9791 txattrs->target_tlb_bit0 = true;
9792 }
9793
9794 if (cacheattrs != NULL) {
9795 if (mmu_idx == ARMMMUIdx_S2NS) {
9796 cacheattrs->attrs = convert_stage2_attrs(env,
9797 extract32(attrs, 0, 4));
9798 } else {
9799 /* Index into MAIR registers for cache attributes */
9800 uint8_t attrindx = extract32(attrs, 0, 3);
9801 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
9802 assert(attrindx <= 7);
9803 cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
9804 }
9805 cacheattrs->shareability = extract32(attrs, 6, 2);
9806 }
9807
9808 *phys_ptr = descaddr;
9809 *page_size_ptr = page_size;
9810 return false;
9811
9812 do_fault:
9813 fi->type = fault_type;
9814 fi->level = level;
9815 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
9816 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
9817 return true;
9818 }
9819
get_phys_addr_pmsav7_default(CPUARMState * env,ARMMMUIdx mmu_idx,int32_t address,int * prot)9820 static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
9821 ARMMMUIdx mmu_idx,
9822 int32_t address, int *prot)
9823 {
9824 if (!arm_feature(env, ARM_FEATURE_M)) {
9825 *prot = PAGE_READ | PAGE_WRITE;
9826 switch (address) {
9827 case 0xF0000000 ... 0xFFFFFFFF:
9828 if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
9829 /* hivecs execing is ok */
9830 *prot |= PAGE_EXEC;
9831 }
9832 break;
9833 case 0x00000000 ... 0x7FFFFFFF:
9834 *prot |= PAGE_EXEC;
9835 break;
9836 }
9837 } else {
9838 /* Default system address map for M profile cores.
9839 * The architecture specifies which regions are execute-never;
9840 * at the MPU level no other checks are defined.
9841 */
9842 switch (address) {
9843 case 0x00000000 ... 0x1fffffff: /* ROM */
9844 case 0x20000000 ... 0x3fffffff: /* SRAM */
9845 case 0x60000000 ... 0x7fffffff: /* RAM */
9846 case 0x80000000 ... 0x9fffffff: /* RAM */
9847 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
9848 break;
9849 case 0x40000000 ... 0x5fffffff: /* Peripheral */
9850 case 0xa0000000 ... 0xbfffffff: /* Device */
9851 case 0xc0000000 ... 0xdfffffff: /* Device */
9852 case 0xe0000000 ... 0xffffffff: /* System */
9853 *prot = PAGE_READ | PAGE_WRITE;
9854 break;
9855 default:
9856 g_assert_not_reached();
9857 }
9858 }
9859 }
9860
pmsav7_use_background_region(ARMCPU * cpu,ARMMMUIdx mmu_idx,bool is_user)9861 static bool pmsav7_use_background_region(ARMCPU *cpu,
9862 ARMMMUIdx mmu_idx, bool is_user)
9863 {
9864 /* Return true if we should use the default memory map as a
9865 * "background" region if there are no hits against any MPU regions.
9866 */
9867 CPUARMState *env = &cpu->env;
9868
9869 if (is_user) {
9870 return false;
9871 }
9872
9873 if (arm_feature(env, ARM_FEATURE_M)) {
9874 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
9875 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
9876 } else {
9877 return regime_sctlr(env, mmu_idx) & SCTLR_BR;
9878 }
9879 }
9880
m_is_ppb_region(CPUARMState * env,uint32_t address)9881 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
9882 {
9883 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
9884 return arm_feature(env, ARM_FEATURE_M) &&
9885 extract32(address, 20, 12) == 0xe00;
9886 }
9887
m_is_system_region(CPUARMState * env,uint32_t address)9888 static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
9889 {
9890 /* True if address is in the M profile system region
9891 * 0xe0000000 - 0xffffffff
9892 */
9893 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
9894 }
9895
get_phys_addr_pmsav7(CPUARMState * env,uint32_t address,MMUAccessType access_type,ARMMMUIdx mmu_idx,hwaddr * phys_ptr,int * prot,target_ulong * page_size,ARMMMUFaultInfo * fi)9896 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
9897 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9898 hwaddr *phys_ptr, int *prot,
9899 target_ulong *page_size,
9900 ARMMMUFaultInfo *fi)
9901 {
9902 ARMCPU *cpu = env_archcpu(env);
9903 int n;
9904 bool is_user = regime_is_user(env, mmu_idx);
9905
9906 *phys_ptr = address;
9907 *page_size = TARGET_PAGE_SIZE;
9908 *prot = 0;
9909
9910 if (regime_translation_disabled(env, mmu_idx) ||
9911 m_is_ppb_region(env, address)) {
9912 /* MPU disabled or M profile PPB access: use default memory map.
9913 * The other case which uses the default memory map in the
9914 * v7M ARM ARM pseudocode is exception vector reads from the vector
9915 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
9916 * which always does a direct read using address_space_ldl(), rather
9917 * than going via this function, so we don't need to check that here.
9918 */
9919 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
9920 } else { /* MPU enabled */
9921 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
9922 /* region search */
9923 uint32_t base = env->pmsav7.drbar[n];
9924 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
9925 uint32_t rmask;
9926 bool srdis = false;
9927
9928 if (!(env->pmsav7.drsr[n] & 0x1)) {
9929 continue;
9930 }
9931
9932 if (!rsize) {
9933 qemu_log_mask(LOG_GUEST_ERROR,
9934 "DRSR[%d]: Rsize field cannot be 0\n", n);
9935 continue;
9936 }
9937 rsize++;
9938 rmask = (1ull << rsize) - 1;
9939
9940 if (base & rmask) {
9941 qemu_log_mask(LOG_GUEST_ERROR,
9942 "DRBAR[%d]: 0x%" PRIx32 " misaligned "
9943 "to DRSR region size, mask = 0x%" PRIx32 "\n",
9944 n, base, rmask);
9945 continue;
9946 }
9947
9948 if (address < base || address > base + rmask) {
9949 /*
9950 * Address not in this region. We must check whether the
9951 * region covers addresses in the same page as our address.
9952 * In that case we must not report a size that covers the
9953 * whole page for a subsequent hit against a different MPU
9954 * region or the background region, because it would result in
9955 * incorrect TLB hits for subsequent accesses to addresses that
9956 * are in this MPU region.
9957 */
9958 if (ranges_overlap(base, rmask,
9959 address & TARGET_PAGE_MASK,
9960 TARGET_PAGE_SIZE)) {
9961 *page_size = 1;
9962 }
9963 continue;
9964 }
9965
9966 /* Region matched */
9967
9968 if (rsize >= 8) { /* no subregions for regions < 256 bytes */
9969 int i, snd;
9970 uint32_t srdis_mask;
9971
9972 rsize -= 3; /* sub region size (power of 2) */
9973 snd = ((address - base) >> rsize) & 0x7;
9974 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
9975
9976 srdis_mask = srdis ? 0x3 : 0x0;
9977 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
9978 /* This will check in groups of 2, 4 and then 8, whether
9979 * the subregion bits are consistent. rsize is incremented
9980 * back up to give the region size, considering consistent
9981 * adjacent subregions as one region. Stop testing if rsize
9982 * is already big enough for an entire QEMU page.
9983 */
9984 int snd_rounded = snd & ~(i - 1);
9985 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
9986 snd_rounded + 8, i);
9987 if (srdis_mask ^ srdis_multi) {
9988 break;
9989 }
9990 srdis_mask = (srdis_mask << i) | srdis_mask;
9991 rsize++;
9992 }
9993 }
9994 if (srdis) {
9995 continue;
9996 }
9997 if (rsize < TARGET_PAGE_BITS) {
9998 *page_size = 1 << rsize;
9999 }
10000 break;
10001 }
10002
10003 if (n == -1) { /* no hits */
10004 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
10005 /* background fault */
10006 fi->type = ARMFault_Background;
10007 return true;
10008 }
10009 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
10010 } else { /* a MPU hit! */
10011 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
10012 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
10013
10014 if (m_is_system_region(env, address)) {
10015 /* System space is always execute never */
10016 xn = 1;
10017 }
10018
10019 if (is_user) { /* User mode AP bit decoding */
10020 switch (ap) {
10021 case 0:
10022 case 1:
10023 case 5:
10024 break; /* no access */
10025 case 3:
10026 *prot |= PAGE_WRITE;
10027 /* fall through */
10028 case 2:
10029 case 6:
10030 *prot |= PAGE_READ | PAGE_EXEC;
10031 break;
10032 case 7:
10033 /* for v7M, same as 6; for R profile a reserved value */
10034 if (arm_feature(env, ARM_FEATURE_M)) {
10035 *prot |= PAGE_READ | PAGE_EXEC;
10036 break;
10037 }
10038 /* fall through */
10039 default:
10040 qemu_log_mask(LOG_GUEST_ERROR,
10041 "DRACR[%d]: Bad value for AP bits: 0x%"
10042 PRIx32 "\n", n, ap);
10043 }
10044 } else { /* Priv. mode AP bits decoding */
10045 switch (ap) {
10046 case 0:
10047 break; /* no access */
10048 case 1:
10049 case 2:
10050 case 3:
10051 *prot |= PAGE_WRITE;
10052 /* fall through */
10053 case 5:
10054 case 6:
10055 *prot |= PAGE_READ | PAGE_EXEC;
10056 break;
10057 case 7:
10058 /* for v7M, same as 6; for R profile a reserved value */
10059 if (arm_feature(env, ARM_FEATURE_M)) {
10060 *prot |= PAGE_READ | PAGE_EXEC;
10061 break;
10062 }
10063 /* fall through */
10064 default:
10065 qemu_log_mask(LOG_GUEST_ERROR,
10066 "DRACR[%d]: Bad value for AP bits: 0x%"
10067 PRIx32 "\n", n, ap);
10068 }
10069 }
10070
10071 /* execute never */
10072 if (xn) {
10073 *prot &= ~PAGE_EXEC;
10074 }
10075 }
10076 }
10077
10078 fi->type = ARMFault_Permission;
10079 fi->level = 1;
10080 return !(*prot & (1 << access_type));
10081 }
10082
v8m_is_sau_exempt(CPUARMState * env,uint32_t address,MMUAccessType access_type)10083 static bool v8m_is_sau_exempt(CPUARMState *env,
10084 uint32_t address, MMUAccessType access_type)
10085 {
10086 /* The architecture specifies that certain address ranges are
10087 * exempt from v8M SAU/IDAU checks.
10088 */
10089 return
10090 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
10091 (address >= 0xe0000000 && address <= 0xe0002fff) ||
10092 (address >= 0xe000e000 && address <= 0xe000efff) ||
10093 (address >= 0xe002e000 && address <= 0xe002efff) ||
10094 (address >= 0xe0040000 && address <= 0xe0041fff) ||
10095 (address >= 0xe00ff000 && address <= 0xe00fffff);
10096 }
10097
v8m_security_lookup(CPUARMState * env,uint32_t address,MMUAccessType access_type,ARMMMUIdx mmu_idx,V8M_SAttributes * sattrs)10098 void v8m_security_lookup(CPUARMState *env, uint32_t address,
10099 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10100 V8M_SAttributes *sattrs)
10101 {
10102 /* Look up the security attributes for this address. Compare the
10103 * pseudocode SecurityCheck() function.
10104 * We assume the caller has zero-initialized *sattrs.
10105 */
10106 ARMCPU *cpu = env_archcpu(env);
10107 int r;
10108 bool idau_exempt = false, idau_ns = true, idau_nsc = true;
10109 int idau_region = IREGION_NOTVALID;
10110 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
10111 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
10112
10113 if (cpu->idau) {
10114 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
10115 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
10116
10117 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
10118 &idau_nsc);
10119 }
10120
10121 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
10122 /* 0xf0000000..0xffffffff is always S for insn fetches */
10123 return;
10124 }
10125
10126 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
10127 sattrs->ns = !regime_is_secure(env, mmu_idx);
10128 return;
10129 }
10130
10131 if (idau_region != IREGION_NOTVALID) {
10132 sattrs->irvalid = true;
10133 sattrs->iregion = idau_region;
10134 }
10135
10136 switch (env->sau.ctrl & 3) {
10137 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
10138 break;
10139 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
10140 sattrs->ns = true;
10141 break;
10142 default: /* SAU.ENABLE == 1 */
10143 for (r = 0; r < cpu->sau_sregion; r++) {
10144 if (env->sau.rlar[r] & 1) {
10145 uint32_t base = env->sau.rbar[r] & ~0x1f;
10146 uint32_t limit = env->sau.rlar[r] | 0x1f;
10147
10148 if (base <= address && limit >= address) {
10149 if (base > addr_page_base || limit < addr_page_limit) {
10150 sattrs->subpage = true;
10151 }
10152 if (sattrs->srvalid) {
10153 /* If we hit in more than one region then we must report
10154 * as Secure, not NS-Callable, with no valid region
10155 * number info.
10156 */
10157 sattrs->ns = false;
10158 sattrs->nsc = false;
10159 sattrs->sregion = 0;
10160 sattrs->srvalid = false;
10161 break;
10162 } else {
10163 if (env->sau.rlar[r] & 2) {
10164 sattrs->nsc = true;
10165 } else {
10166 sattrs->ns = true;
10167 }
10168 sattrs->srvalid = true;
10169 sattrs->sregion = r;
10170 }
10171 } else {
10172 /*
10173 * Address not in this region. We must check whether the
10174 * region covers addresses in the same page as our address.
10175 * In that case we must not report a size that covers the
10176 * whole page for a subsequent hit against a different MPU
10177 * region or the background region, because it would result
10178 * in incorrect TLB hits for subsequent accesses to
10179 * addresses that are in this MPU region.
10180 */
10181 if (limit >= base &&
10182 ranges_overlap(base, limit - base + 1,
10183 addr_page_base,
10184 TARGET_PAGE_SIZE)) {
10185 sattrs->subpage = true;
10186 }
10187 }
10188 }
10189 }
10190 break;
10191 }
10192
10193 /*
10194 * The IDAU will override the SAU lookup results if it specifies
10195 * higher security than the SAU does.
10196 */
10197 if (!idau_ns) {
10198 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
10199 sattrs->ns = false;
10200 sattrs->nsc = idau_nsc;
10201 }
10202 }
10203 }
10204
pmsav8_mpu_lookup(CPUARMState * env,uint32_t address,MMUAccessType access_type,ARMMMUIdx mmu_idx,hwaddr * phys_ptr,MemTxAttrs * txattrs,int * prot,bool * is_subpage,ARMMMUFaultInfo * fi,uint32_t * mregion)10205 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
10206 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10207 hwaddr *phys_ptr, MemTxAttrs *txattrs,
10208 int *prot, bool *is_subpage,
10209 ARMMMUFaultInfo *fi, uint32_t *mregion)
10210 {
10211 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
10212 * that a full phys-to-virt translation does).
10213 * mregion is (if not NULL) set to the region number which matched,
10214 * or -1 if no region number is returned (MPU off, address did not
10215 * hit a region, address hit in multiple regions).
10216 * We set is_subpage to true if the region hit doesn't cover the
10217 * entire TARGET_PAGE the address is within.
10218 */
10219 ARMCPU *cpu = env_archcpu(env);
10220 bool is_user = regime_is_user(env, mmu_idx);
10221 uint32_t secure = regime_is_secure(env, mmu_idx);
10222 int n;
10223 int matchregion = -1;
10224 bool hit = false;
10225 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
10226 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
10227
10228 *is_subpage = false;
10229 *phys_ptr = address;
10230 *prot = 0;
10231 if (mregion) {
10232 *mregion = -1;
10233 }
10234
10235 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
10236 * was an exception vector read from the vector table (which is always
10237 * done using the default system address map), because those accesses
10238 * are done in arm_v7m_load_vector(), which always does a direct
10239 * read using address_space_ldl(), rather than going via this function.
10240 */
10241 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
10242 hit = true;
10243 } else if (m_is_ppb_region(env, address)) {
10244 hit = true;
10245 } else {
10246 if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
10247 hit = true;
10248 }
10249
10250 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
10251 /* region search */
10252 /* Note that the base address is bits [31:5] from the register
10253 * with bits [4:0] all zeroes, but the limit address is bits
10254 * [31:5] from the register with bits [4:0] all ones.
10255 */
10256 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
10257 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
10258
10259 if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
10260 /* Region disabled */
10261 continue;
10262 }
10263
10264 if (address < base || address > limit) {
10265 /*
10266 * Address not in this region. We must check whether the
10267 * region covers addresses in the same page as our address.
10268 * In that case we must not report a size that covers the
10269 * whole page for a subsequent hit against a different MPU
10270 * region or the background region, because it would result in
10271 * incorrect TLB hits for subsequent accesses to addresses that
10272 * are in this MPU region.
10273 */
10274 if (limit >= base &&
10275 ranges_overlap(base, limit - base + 1,
10276 addr_page_base,
10277 TARGET_PAGE_SIZE)) {
10278 *is_subpage = true;
10279 }
10280 continue;
10281 }
10282
10283 if (base > addr_page_base || limit < addr_page_limit) {
10284 *is_subpage = true;
10285 }
10286
10287 if (matchregion != -1) {
10288 /* Multiple regions match -- always a failure (unlike
10289 * PMSAv7 where highest-numbered-region wins)
10290 */
10291 fi->type = ARMFault_Permission;
10292 fi->level = 1;
10293 return true;
10294 }
10295
10296 matchregion = n;
10297 hit = true;
10298 }
10299 }
10300
10301 if (!hit) {
10302 /* background fault */
10303 fi->type = ARMFault_Background;
10304 return true;
10305 }
10306
10307 if (matchregion == -1) {
10308 /* hit using the background region */
10309 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
10310 } else {
10311 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
10312 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
10313
10314 if (m_is_system_region(env, address)) {
10315 /* System space is always execute never */
10316 xn = 1;
10317 }
10318
10319 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
10320 if (*prot && !xn) {
10321 *prot |= PAGE_EXEC;
10322 }
10323 /* We don't need to look the attribute up in the MAIR0/MAIR1
10324 * registers because that only tells us about cacheability.
10325 */
10326 if (mregion) {
10327 *mregion = matchregion;
10328 }
10329 }
10330
10331 fi->type = ARMFault_Permission;
10332 fi->level = 1;
10333 return !(*prot & (1 << access_type));
10334 }
10335
10336
get_phys_addr_pmsav8(CPUARMState * env,uint32_t address,MMUAccessType access_type,ARMMMUIdx mmu_idx,hwaddr * phys_ptr,MemTxAttrs * txattrs,int * prot,target_ulong * page_size,ARMMMUFaultInfo * fi)10337 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
10338 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10339 hwaddr *phys_ptr, MemTxAttrs *txattrs,
10340 int *prot, target_ulong *page_size,
10341 ARMMMUFaultInfo *fi)
10342 {
10343 uint32_t secure = regime_is_secure(env, mmu_idx);
10344 V8M_SAttributes sattrs = {};
10345 bool ret;
10346 bool mpu_is_subpage;
10347
10348 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
10349 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
10350 if (access_type == MMU_INST_FETCH) {
10351 /* Instruction fetches always use the MMU bank and the
10352 * transaction attribute determined by the fetch address,
10353 * regardless of CPU state. This is painful for QEMU
10354 * to handle, because it would mean we need to encode
10355 * into the mmu_idx not just the (user, negpri) information
10356 * for the current security state but also that for the
10357 * other security state, which would balloon the number
10358 * of mmu_idx values needed alarmingly.
10359 * Fortunately we can avoid this because it's not actually
10360 * possible to arbitrarily execute code from memory with
10361 * the wrong security attribute: it will always generate
10362 * an exception of some kind or another, apart from the
10363 * special case of an NS CPU executing an SG instruction
10364 * in S&NSC memory. So we always just fail the translation
10365 * here and sort things out in the exception handler
10366 * (including possibly emulating an SG instruction).
10367 */
10368 if (sattrs.ns != !secure) {
10369 if (sattrs.nsc) {
10370 fi->type = ARMFault_QEMU_NSCExec;
10371 } else {
10372 fi->type = ARMFault_QEMU_SFault;
10373 }
10374 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
10375 *phys_ptr = address;
10376 *prot = 0;
10377 return true;
10378 }
10379 } else {
10380 /* For data accesses we always use the MMU bank indicated
10381 * by the current CPU state, but the security attributes
10382 * might downgrade a secure access to nonsecure.
10383 */
10384 if (sattrs.ns) {
10385 txattrs->secure = false;
10386 } else if (!secure) {
10387 /* NS access to S memory must fault.
10388 * Architecturally we should first check whether the
10389 * MPU information for this address indicates that we
10390 * are doing an unaligned access to Device memory, which
10391 * should generate a UsageFault instead. QEMU does not
10392 * currently check for that kind of unaligned access though.
10393 * If we added it we would need to do so as a special case
10394 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
10395 */
10396 fi->type = ARMFault_QEMU_SFault;
10397 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
10398 *phys_ptr = address;
10399 *prot = 0;
10400 return true;
10401 }
10402 }
10403 }
10404
10405 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
10406 txattrs, prot, &mpu_is_subpage, fi, NULL);
10407 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
10408 return ret;
10409 }
10410
get_phys_addr_pmsav5(CPUARMState * env,uint32_t address,MMUAccessType access_type,ARMMMUIdx mmu_idx,hwaddr * phys_ptr,int * prot,ARMMMUFaultInfo * fi)10411 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
10412 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10413 hwaddr *phys_ptr, int *prot,
10414 ARMMMUFaultInfo *fi)
10415 {
10416 int n;
10417 uint32_t mask;
10418 uint32_t base;
10419 bool is_user = regime_is_user(env, mmu_idx);
10420
10421 if (regime_translation_disabled(env, mmu_idx)) {
10422 /* MPU disabled. */
10423 *phys_ptr = address;
10424 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10425 return false;
10426 }
10427
10428 *phys_ptr = address;
10429 for (n = 7; n >= 0; n--) {
10430 base = env->cp15.c6_region[n];
10431 if ((base & 1) == 0) {
10432 continue;
10433 }
10434 mask = 1 << ((base >> 1) & 0x1f);
10435 /* Keep this shift separate from the above to avoid an
10436 (undefined) << 32. */
10437 mask = (mask << 1) - 1;
10438 if (((base ^ address) & ~mask) == 0) {
10439 break;
10440 }
10441 }
10442 if (n < 0) {
10443 fi->type = ARMFault_Background;
10444 return true;
10445 }
10446
10447 if (access_type == MMU_INST_FETCH) {
10448 mask = env->cp15.pmsav5_insn_ap;
10449 } else {
10450 mask = env->cp15.pmsav5_data_ap;
10451 }
10452 mask = (mask >> (n * 4)) & 0xf;
10453 switch (mask) {
10454 case 0:
10455 fi->type = ARMFault_Permission;
10456 fi->level = 1;
10457 return true;
10458 case 1:
10459 if (is_user) {
10460 fi->type = ARMFault_Permission;
10461 fi->level = 1;
10462 return true;
10463 }
10464 *prot = PAGE_READ | PAGE_WRITE;
10465 break;
10466 case 2:
10467 *prot = PAGE_READ;
10468 if (!is_user) {
10469 *prot |= PAGE_WRITE;
10470 }
10471 break;
10472 case 3:
10473 *prot = PAGE_READ | PAGE_WRITE;
10474 break;
10475 case 5:
10476 if (is_user) {
10477 fi->type = ARMFault_Permission;
10478 fi->level = 1;
10479 return true;
10480 }
10481 *prot = PAGE_READ;
10482 break;
10483 case 6:
10484 *prot = PAGE_READ;
10485 break;
10486 default:
10487 /* Bad permission. */
10488 fi->type = ARMFault_Permission;
10489 fi->level = 1;
10490 return true;
10491 }
10492 *prot |= PAGE_EXEC;
10493 return false;
10494 }
10495
10496 /* Combine either inner or outer cacheability attributes for normal
10497 * memory, according to table D4-42 and pseudocode procedure
10498 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
10499 *
10500 * NB: only stage 1 includes allocation hints (RW bits), leading to
10501 * some asymmetry.
10502 */
combine_cacheattr_nibble(uint8_t s1,uint8_t s2)10503 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
10504 {
10505 if (s1 == 4 || s2 == 4) {
10506 /* non-cacheable has precedence */
10507 return 4;
10508 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
10509 /* stage 1 write-through takes precedence */
10510 return s1;
10511 } else if (extract32(s2, 2, 2) == 2) {
10512 /* stage 2 write-through takes precedence, but the allocation hint
10513 * is still taken from stage 1
10514 */
10515 return (2 << 2) | extract32(s1, 0, 2);
10516 } else { /* write-back */
10517 return s1;
10518 }
10519 }
10520
10521 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
10522 * and CombineS1S2Desc()
10523 *
10524 * @s1: Attributes from stage 1 walk
10525 * @s2: Attributes from stage 2 walk
10526 */
combine_cacheattrs(ARMCacheAttrs s1,ARMCacheAttrs s2)10527 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
10528 {
10529 uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4);
10530 uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4);
10531 ARMCacheAttrs ret;
10532
10533 /* Combine shareability attributes (table D4-43) */
10534 if (s1.shareability == 2 || s2.shareability == 2) {
10535 /* if either are outer-shareable, the result is outer-shareable */
10536 ret.shareability = 2;
10537 } else if (s1.shareability == 3 || s2.shareability == 3) {
10538 /* if either are inner-shareable, the result is inner-shareable */
10539 ret.shareability = 3;
10540 } else {
10541 /* both non-shareable */
10542 ret.shareability = 0;
10543 }
10544
10545 /* Combine memory type and cacheability attributes */
10546 if (s1hi == 0 || s2hi == 0) {
10547 /* Device has precedence over normal */
10548 if (s1lo == 0 || s2lo == 0) {
10549 /* nGnRnE has precedence over anything */
10550 ret.attrs = 0;
10551 } else if (s1lo == 4 || s2lo == 4) {
10552 /* non-Reordering has precedence over Reordering */
10553 ret.attrs = 4; /* nGnRE */
10554 } else if (s1lo == 8 || s2lo == 8) {
10555 /* non-Gathering has precedence over Gathering */
10556 ret.attrs = 8; /* nGRE */
10557 } else {
10558 ret.attrs = 0xc; /* GRE */
10559 }
10560
10561 /* Any location for which the resultant memory type is any
10562 * type of Device memory is always treated as Outer Shareable.
10563 */
10564 ret.shareability = 2;
10565 } else { /* Normal memory */
10566 /* Outer/inner cacheability combine independently */
10567 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
10568 | combine_cacheattr_nibble(s1lo, s2lo);
10569
10570 if (ret.attrs == 0x44) {
10571 /* Any location for which the resultant memory type is Normal
10572 * Inner Non-cacheable, Outer Non-cacheable is always treated
10573 * as Outer Shareable.
10574 */
10575 ret.shareability = 2;
10576 }
10577 }
10578
10579 return ret;
10580 }
10581
10582
10583 /* get_phys_addr - get the physical address for this virtual address
10584 *
10585 * Find the physical address corresponding to the given virtual address,
10586 * by doing a translation table walk on MMU based systems or using the
10587 * MPU state on MPU based systems.
10588 *
10589 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
10590 * prot and page_size may not be filled in, and the populated fsr value provides
10591 * information on why the translation aborted, in the format of a
10592 * DFSR/IFSR fault register, with the following caveats:
10593 * * we honour the short vs long DFSR format differences.
10594 * * the WnR bit is never set (the caller must do this).
10595 * * for PSMAv5 based systems we don't bother to return a full FSR format
10596 * value.
10597 *
10598 * @env: CPUARMState
10599 * @address: virtual address to get physical address for
10600 * @access_type: 0 for read, 1 for write, 2 for execute
10601 * @mmu_idx: MMU index indicating required translation regime
10602 * @phys_ptr: set to the physical address corresponding to the virtual address
10603 * @attrs: set to the memory transaction attributes to use
10604 * @prot: set to the permissions for the page containing phys_ptr
10605 * @page_size: set to the size of the page containing phys_ptr
10606 * @fi: set to fault info if the translation fails
10607 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
10608 */
get_phys_addr(CPUARMState * env,target_ulong address,MMUAccessType access_type,ARMMMUIdx mmu_idx,hwaddr * phys_ptr,MemTxAttrs * attrs,int * prot,target_ulong * page_size,ARMMMUFaultInfo * fi,ARMCacheAttrs * cacheattrs)10609 bool get_phys_addr(CPUARMState *env, target_ulong address,
10610 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10611 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
10612 target_ulong *page_size,
10613 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
10614 {
10615 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
10616 /* Call ourselves recursively to do the stage 1 and then stage 2
10617 * translations.
10618 */
10619 if (arm_feature(env, ARM_FEATURE_EL2)) {
10620 hwaddr ipa;
10621 int s2_prot;
10622 int ret;
10623 ARMCacheAttrs cacheattrs2 = {};
10624
10625 ret = get_phys_addr(env, address, access_type,
10626 stage_1_mmu_idx(mmu_idx), &ipa, attrs,
10627 prot, page_size, fi, cacheattrs);
10628
10629 /* If S1 fails or S2 is disabled, return early. */
10630 if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
10631 *phys_ptr = ipa;
10632 return ret;
10633 }
10634
10635 /* S1 is done. Now do S2 translation. */
10636 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
10637 phys_ptr, attrs, &s2_prot,
10638 page_size, fi,
10639 cacheattrs != NULL ? &cacheattrs2 : NULL);
10640 fi->s2addr = ipa;
10641 /* Combine the S1 and S2 perms. */
10642 *prot &= s2_prot;
10643
10644 /* Combine the S1 and S2 cache attributes, if needed */
10645 if (!ret && cacheattrs != NULL) {
10646 if (env->cp15.hcr_el2 & HCR_DC) {
10647 /*
10648 * HCR.DC forces the first stage attributes to
10649 * Normal Non-Shareable,
10650 * Inner Write-Back Read-Allocate Write-Allocate,
10651 * Outer Write-Back Read-Allocate Write-Allocate.
10652 */
10653 cacheattrs->attrs = 0xff;
10654 cacheattrs->shareability = 0;
10655 }
10656 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
10657 }
10658
10659 return ret;
10660 } else {
10661 /*
10662 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
10663 */
10664 mmu_idx = stage_1_mmu_idx(mmu_idx);
10665 }
10666 }
10667
10668 /* The page table entries may downgrade secure to non-secure, but
10669 * cannot upgrade an non-secure translation regime's attributes
10670 * to secure.
10671 */
10672 attrs->secure = regime_is_secure(env, mmu_idx);
10673 attrs->user = regime_is_user(env, mmu_idx);
10674
10675 /* Fast Context Switch Extension. This doesn't exist at all in v8.
10676 * In v7 and earlier it affects all stage 1 translations.
10677 */
10678 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS
10679 && !arm_feature(env, ARM_FEATURE_V8)) {
10680 if (regime_el(env, mmu_idx) == 3) {
10681 address += env->cp15.fcseidr_s;
10682 } else {
10683 address += env->cp15.fcseidr_ns;
10684 }
10685 }
10686
10687 if (arm_feature(env, ARM_FEATURE_PMSA)) {
10688 bool ret;
10689 *page_size = TARGET_PAGE_SIZE;
10690
10691 if (arm_feature(env, ARM_FEATURE_V8)) {
10692 /* PMSAv8 */
10693 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
10694 phys_ptr, attrs, prot, page_size, fi);
10695 } else if (arm_feature(env, ARM_FEATURE_V7)) {
10696 /* PMSAv7 */
10697 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
10698 phys_ptr, prot, page_size, fi);
10699 } else {
10700 /* Pre-v7 MPU */
10701 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
10702 phys_ptr, prot, fi);
10703 }
10704 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
10705 " mmu_idx %u -> %s (prot %c%c%c)\n",
10706 access_type == MMU_DATA_LOAD ? "reading" :
10707 (access_type == MMU_DATA_STORE ? "writing" : "execute"),
10708 (uint32_t)address, mmu_idx,
10709 ret ? "Miss" : "Hit",
10710 *prot & PAGE_READ ? 'r' : '-',
10711 *prot & PAGE_WRITE ? 'w' : '-',
10712 *prot & PAGE_EXEC ? 'x' : '-');
10713
10714 return ret;
10715 }
10716
10717 /* Definitely a real MMU, not an MPU */
10718
10719 if (regime_translation_disabled(env, mmu_idx)) {
10720 /* MMU disabled. */
10721 *phys_ptr = address;
10722 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10723 *page_size = TARGET_PAGE_SIZE;
10724 return 0;
10725 }
10726
10727 if (regime_using_lpae_format(env, mmu_idx)) {
10728 return get_phys_addr_lpae(env, address, access_type, mmu_idx,
10729 phys_ptr, attrs, prot, page_size,
10730 fi, cacheattrs);
10731 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
10732 return get_phys_addr_v6(env, address, access_type, mmu_idx,
10733 phys_ptr, attrs, prot, page_size, fi);
10734 } else {
10735 return get_phys_addr_v5(env, address, access_type, mmu_idx,
10736 phys_ptr, prot, page_size, fi);
10737 }
10738 }
10739
arm_cpu_get_phys_page_attrs_debug(CPUState * cs,vaddr addr,MemTxAttrs * attrs)10740 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
10741 MemTxAttrs *attrs)
10742 {
10743 ARMCPU *cpu = ARM_CPU(cs);
10744 CPUARMState *env = &cpu->env;
10745 hwaddr phys_addr;
10746 target_ulong page_size;
10747 int prot;
10748 bool ret;
10749 ARMMMUFaultInfo fi = {};
10750 ARMMMUIdx mmu_idx = arm_mmu_idx(env);
10751
10752 *attrs = (MemTxAttrs) {};
10753
10754 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
10755 attrs, &prot, &page_size, &fi, NULL);
10756
10757 if (ret) {
10758 return -1;
10759 }
10760 return phys_addr;
10761 }
10762
10763 #endif
10764
10765 /* Note that signed overflow is undefined in C. The following routines are
10766 careful to use unsigned types where modulo arithmetic is required.
10767 Failure to do so _will_ break on newer gcc. */
10768
10769 /* Signed saturating arithmetic. */
10770
10771 /* Perform 16-bit signed saturating addition. */
add16_sat(uint16_t a,uint16_t b)10772 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
10773 {
10774 uint16_t res;
10775
10776 res = a + b;
10777 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
10778 if (a & 0x8000)
10779 res = 0x8000;
10780 else
10781 res = 0x7fff;
10782 }
10783 return res;
10784 }
10785
10786 /* Perform 8-bit signed saturating addition. */
add8_sat(uint8_t a,uint8_t b)10787 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
10788 {
10789 uint8_t res;
10790
10791 res = a + b;
10792 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
10793 if (a & 0x80)
10794 res = 0x80;
10795 else
10796 res = 0x7f;
10797 }
10798 return res;
10799 }
10800
10801 /* Perform 16-bit signed saturating subtraction. */
sub16_sat(uint16_t a,uint16_t b)10802 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
10803 {
10804 uint16_t res;
10805
10806 res = a - b;
10807 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
10808 if (a & 0x8000)
10809 res = 0x8000;
10810 else
10811 res = 0x7fff;
10812 }
10813 return res;
10814 }
10815
10816 /* Perform 8-bit signed saturating subtraction. */
sub8_sat(uint8_t a,uint8_t b)10817 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
10818 {
10819 uint8_t res;
10820
10821 res = a - b;
10822 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
10823 if (a & 0x80)
10824 res = 0x80;
10825 else
10826 res = 0x7f;
10827 }
10828 return res;
10829 }
10830
10831 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
10832 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
10833 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
10834 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
10835 #define PFX q
10836
10837 #include "op_addsub.h"
10838
10839 /* Unsigned saturating arithmetic. */
add16_usat(uint16_t a,uint16_t b)10840 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
10841 {
10842 uint16_t res;
10843 res = a + b;
10844 if (res < a)
10845 res = 0xffff;
10846 return res;
10847 }
10848
sub16_usat(uint16_t a,uint16_t b)10849 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
10850 {
10851 if (a > b)
10852 return a - b;
10853 else
10854 return 0;
10855 }
10856
add8_usat(uint8_t a,uint8_t b)10857 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
10858 {
10859 uint8_t res;
10860 res = a + b;
10861 if (res < a)
10862 res = 0xff;
10863 return res;
10864 }
10865
sub8_usat(uint8_t a,uint8_t b)10866 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
10867 {
10868 if (a > b)
10869 return a - b;
10870 else
10871 return 0;
10872 }
10873
10874 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
10875 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
10876 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
10877 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
10878 #define PFX uq
10879
10880 #include "op_addsub.h"
10881
10882 /* Signed modulo arithmetic. */
10883 #define SARITH16(a, b, n, op) do { \
10884 int32_t sum; \
10885 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
10886 RESULT(sum, n, 16); \
10887 if (sum >= 0) \
10888 ge |= 3 << (n * 2); \
10889 } while(0)
10890
10891 #define SARITH8(a, b, n, op) do { \
10892 int32_t sum; \
10893 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
10894 RESULT(sum, n, 8); \
10895 if (sum >= 0) \
10896 ge |= 1 << n; \
10897 } while(0)
10898
10899
10900 #define ADD16(a, b, n) SARITH16(a, b, n, +)
10901 #define SUB16(a, b, n) SARITH16(a, b, n, -)
10902 #define ADD8(a, b, n) SARITH8(a, b, n, +)
10903 #define SUB8(a, b, n) SARITH8(a, b, n, -)
10904 #define PFX s
10905 #define ARITH_GE
10906
10907 #include "op_addsub.h"
10908
10909 /* Unsigned modulo arithmetic. */
10910 #define ADD16(a, b, n) do { \
10911 uint32_t sum; \
10912 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
10913 RESULT(sum, n, 16); \
10914 if ((sum >> 16) == 1) \
10915 ge |= 3 << (n * 2); \
10916 } while(0)
10917
10918 #define ADD8(a, b, n) do { \
10919 uint32_t sum; \
10920 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
10921 RESULT(sum, n, 8); \
10922 if ((sum >> 8) == 1) \
10923 ge |= 1 << n; \
10924 } while(0)
10925
10926 #define SUB16(a, b, n) do { \
10927 uint32_t sum; \
10928 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
10929 RESULT(sum, n, 16); \
10930 if ((sum >> 16) == 0) \
10931 ge |= 3 << (n * 2); \
10932 } while(0)
10933
10934 #define SUB8(a, b, n) do { \
10935 uint32_t sum; \
10936 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
10937 RESULT(sum, n, 8); \
10938 if ((sum >> 8) == 0) \
10939 ge |= 1 << n; \
10940 } while(0)
10941
10942 #define PFX u
10943 #define ARITH_GE
10944
10945 #include "op_addsub.h"
10946
10947 /* Halved signed arithmetic. */
10948 #define ADD16(a, b, n) \
10949 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
10950 #define SUB16(a, b, n) \
10951 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
10952 #define ADD8(a, b, n) \
10953 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
10954 #define SUB8(a, b, n) \
10955 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
10956 #define PFX sh
10957
10958 #include "op_addsub.h"
10959
10960 /* Halved unsigned arithmetic. */
10961 #define ADD16(a, b, n) \
10962 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
10963 #define SUB16(a, b, n) \
10964 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
10965 #define ADD8(a, b, n) \
10966 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
10967 #define SUB8(a, b, n) \
10968 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
10969 #define PFX uh
10970
10971 #include "op_addsub.h"
10972
do_usad(uint8_t a,uint8_t b)10973 static inline uint8_t do_usad(uint8_t a, uint8_t b)
10974 {
10975 if (a > b)
10976 return a - b;
10977 else
10978 return b - a;
10979 }
10980
10981 /* Unsigned sum of absolute byte differences. */
HELPER(usad8)10982 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
10983 {
10984 uint32_t sum;
10985 sum = do_usad(a, b);
10986 sum += do_usad(a >> 8, b >> 8);
10987 sum += do_usad(a >> 16, b >>16);
10988 sum += do_usad(a >> 24, b >> 24);
10989 return sum;
10990 }
10991
10992 /* For ARMv6 SEL instruction. */
HELPER(sel_flags)10993 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
10994 {
10995 uint32_t mask;
10996
10997 mask = 0;
10998 if (flags & 1)
10999 mask |= 0xff;
11000 if (flags & 2)
11001 mask |= 0xff00;
11002 if (flags & 4)
11003 mask |= 0xff0000;
11004 if (flags & 8)
11005 mask |= 0xff000000;
11006 return (a & mask) | (b & ~mask);
11007 }
11008
11009 /* CRC helpers.
11010 * The upper bytes of val (above the number specified by 'bytes') must have
11011 * been zeroed out by the caller.
11012 */
HELPER(crc32)11013 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
11014 {
11015 uint8_t buf[4];
11016
11017 stl_le_p(buf, val);
11018
11019 /* zlib crc32 converts the accumulator and output to one's complement. */
11020 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
11021 }
11022
HELPER(crc32c)11023 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
11024 {
11025 uint8_t buf[4];
11026
11027 stl_le_p(buf, val);
11028
11029 /* Linux crc32c converts the output to one's complement. */
11030 return crc32c(acc, buf, bytes) ^ 0xffffffff;
11031 }
11032
11033 /* Return the exception level to which FP-disabled exceptions should
11034 * be taken, or 0 if FP is enabled.
11035 */
fp_exception_el(CPUARMState * env,int cur_el)11036 int fp_exception_el(CPUARMState *env, int cur_el)
11037 {
11038 #ifndef CONFIG_USER_ONLY
11039 int fpen;
11040
11041 /* CPACR and the CPTR registers don't exist before v6, so FP is
11042 * always accessible
11043 */
11044 if (!arm_feature(env, ARM_FEATURE_V6)) {
11045 return 0;
11046 }
11047
11048 if (arm_feature(env, ARM_FEATURE_M)) {
11049 /* CPACR can cause a NOCP UsageFault taken to current security state */
11050 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
11051 return 1;
11052 }
11053
11054 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
11055 if (!extract32(env->v7m.nsacr, 10, 1)) {
11056 /* FP insns cause a NOCP UsageFault taken to Secure */
11057 return 3;
11058 }
11059 }
11060
11061 return 0;
11062 }
11063
11064 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
11065 * 0, 2 : trap EL0 and EL1/PL1 accesses
11066 * 1 : trap only EL0 accesses
11067 * 3 : trap no accesses
11068 */
11069 fpen = extract32(env->cp15.cpacr_el1, 20, 2);
11070 switch (fpen) {
11071 case 0:
11072 case 2:
11073 if (cur_el == 0 || cur_el == 1) {
11074 /* Trap to PL1, which might be EL1 or EL3 */
11075 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
11076 return 3;
11077 }
11078 return 1;
11079 }
11080 if (cur_el == 3 && !is_a64(env)) {
11081 /* Secure PL1 running at EL3 */
11082 return 3;
11083 }
11084 break;
11085 case 1:
11086 if (cur_el == 0) {
11087 return 1;
11088 }
11089 break;
11090 case 3:
11091 break;
11092 }
11093
11094 /*
11095 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
11096 * to control non-secure access to the FPU. It doesn't have any
11097 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
11098 */
11099 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
11100 cur_el <= 2 && !arm_is_secure_below_el3(env))) {
11101 if (!extract32(env->cp15.nsacr, 10, 1)) {
11102 /* FP insns act as UNDEF */
11103 return cur_el == 2 ? 2 : 1;
11104 }
11105 }
11106
11107 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
11108 * check because zero bits in the registers mean "don't trap".
11109 */
11110
11111 /* CPTR_EL2 : present in v7VE or v8 */
11112 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
11113 && !arm_is_secure_below_el3(env)) {
11114 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
11115 return 2;
11116 }
11117
11118 /* CPTR_EL3 : present in v8 */
11119 if (extract32(env->cp15.cptr_el[3], 10, 1)) {
11120 /* Trap all FP ops to EL3 */
11121 return 3;
11122 }
11123 #endif
11124 return 0;
11125 }
11126
11127 #ifndef CONFIG_TCG
arm_v7m_mmu_idx_for_secstate(CPUARMState * env,bool secstate)11128 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
11129 {
11130 g_assert_not_reached();
11131 }
11132 #endif
11133
arm_mmu_idx_el(CPUARMState * env,int el)11134 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
11135 {
11136 if (arm_feature(env, ARM_FEATURE_M)) {
11137 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
11138 }
11139
11140 if (el < 2 && arm_is_secure_below_el3(env)) {
11141 return ARMMMUIdx_S1SE0 + el;
11142 } else {
11143 return ARMMMUIdx_S12NSE0 + el;
11144 }
11145 }
11146
arm_mmu_idx(CPUARMState * env)11147 ARMMMUIdx arm_mmu_idx(CPUARMState *env)
11148 {
11149 return arm_mmu_idx_el(env, arm_current_el(env));
11150 }
11151
cpu_mmu_index(CPUARMState * env,bool ifetch)11152 int cpu_mmu_index(CPUARMState *env, bool ifetch)
11153 {
11154 return arm_to_core_mmu_idx(arm_mmu_idx(env));
11155 }
11156
11157 #ifndef CONFIG_USER_ONLY
arm_stage1_mmu_idx(CPUARMState * env)11158 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
11159 {
11160 return stage_1_mmu_idx(arm_mmu_idx(env));
11161 }
11162 #endif
11163
rebuild_hflags_common(CPUARMState * env,int fp_el,ARMMMUIdx mmu_idx,uint32_t flags)11164 static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
11165 ARMMMUIdx mmu_idx, uint32_t flags)
11166 {
11167 flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el);
11168 flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX,
11169 arm_to_core_mmu_idx(mmu_idx));
11170
11171 if (arm_singlestep_active(env)) {
11172 flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1);
11173 }
11174 return flags;
11175 }
11176
rebuild_hflags_common_32(CPUARMState * env,int fp_el,ARMMMUIdx mmu_idx,uint32_t flags)11177 static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
11178 ARMMMUIdx mmu_idx, uint32_t flags)
11179 {
11180 bool sctlr_b = arm_sctlr_b(env);
11181
11182 if (sctlr_b) {
11183 flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1);
11184 }
11185 if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
11186 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
11187 }
11188 flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
11189
11190 return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
11191 }
11192
rebuild_hflags_m32(CPUARMState * env,int fp_el,ARMMMUIdx mmu_idx)11193 static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
11194 ARMMMUIdx mmu_idx)
11195 {
11196 uint32_t flags = 0;
11197
11198 /* v8M always enables the fpu. */
11199 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
11200
11201 if (arm_v7m_is_handler_mode(env)) {
11202 flags = FIELD_DP32(flags, TBFLAG_A32, HANDLER, 1);
11203 }
11204
11205 /*
11206 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
11207 * is suppressing them because the requested execution priority
11208 * is less than 0.
11209 */
11210 if (arm_feature(env, ARM_FEATURE_V8) &&
11211 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
11212 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
11213 flags = FIELD_DP32(flags, TBFLAG_A32, STACKCHECK, 1);
11214 }
11215
11216 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
11217 }
11218
rebuild_hflags_aprofile(CPUARMState * env)11219 static uint32_t rebuild_hflags_aprofile(CPUARMState *env)
11220 {
11221 int flags = 0;
11222
11223 flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL,
11224 arm_debug_target_el(env));
11225 return flags;
11226 }
11227
rebuild_hflags_a32(CPUARMState * env,int fp_el,ARMMMUIdx mmu_idx)11228 static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
11229 ARMMMUIdx mmu_idx)
11230 {
11231 uint32_t flags = rebuild_hflags_aprofile(env);
11232
11233 if (arm_el_is_aa64(env, 1)) {
11234 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
11235 }
11236 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
11237 }
11238
rebuild_hflags_a64(CPUARMState * env,int el,int fp_el,ARMMMUIdx mmu_idx)11239 static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
11240 ARMMMUIdx mmu_idx)
11241 {
11242 uint32_t flags = rebuild_hflags_aprofile(env);
11243 ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
11244 ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1);
11245 uint64_t sctlr;
11246 int tbii, tbid;
11247
11248 flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
11249
11250 /* FIXME: ARMv8.1-VHE S2 translation regime. */
11251 if (regime_el(env, stage1) < 2) {
11252 ARMVAParameters p1 = aa64_va_parameters_both(env, -1, stage1);
11253 tbid = (p1.tbi << 1) | p0.tbi;
11254 tbii = tbid & ~((p1.tbid << 1) | p0.tbid);
11255 } else {
11256 tbid = p0.tbi;
11257 tbii = tbid & !p0.tbid;
11258 }
11259
11260 flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
11261 flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
11262
11263 if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
11264 int sve_el = sve_exception_el(env, el);
11265 uint32_t zcr_len;
11266
11267 /*
11268 * If SVE is disabled, but FP is enabled,
11269 * then the effective len is 0.
11270 */
11271 if (sve_el != 0 && fp_el == 0) {
11272 zcr_len = 0;
11273 } else {
11274 zcr_len = sve_zcr_len_for_el(env, el);
11275 }
11276 flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
11277 flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
11278 }
11279
11280 sctlr = arm_sctlr(env, el);
11281
11282 if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
11283 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
11284 }
11285
11286 if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
11287 /*
11288 * In order to save space in flags, we record only whether
11289 * pauth is "inactive", meaning all insns are implemented as
11290 * a nop, or "active" when some action must be performed.
11291 * The decision of which action to take is left to a helper.
11292 */
11293 if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
11294 flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
11295 }
11296 }
11297
11298 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
11299 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
11300 if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
11301 flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
11302 }
11303 }
11304
11305 return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
11306 }
11307
rebuild_hflags_internal(CPUARMState * env)11308 static uint32_t rebuild_hflags_internal(CPUARMState *env)
11309 {
11310 int el = arm_current_el(env);
11311 int fp_el = fp_exception_el(env, el);
11312 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11313
11314 if (is_a64(env)) {
11315 return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
11316 } else if (arm_feature(env, ARM_FEATURE_M)) {
11317 return rebuild_hflags_m32(env, fp_el, mmu_idx);
11318 } else {
11319 return rebuild_hflags_a32(env, fp_el, mmu_idx);
11320 }
11321 }
11322
arm_rebuild_hflags(CPUARMState * env)11323 void arm_rebuild_hflags(CPUARMState *env)
11324 {
11325 env->hflags = rebuild_hflags_internal(env);
11326 }
11327
HELPER(rebuild_hflags_m32)11328 void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
11329 {
11330 int fp_el = fp_exception_el(env, el);
11331 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11332
11333 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
11334 }
11335
11336 /*
11337 * If we have triggered a EL state change we can't rely on the
11338 * translator having passed it too us, we need to recompute.
11339 */
HELPER(rebuild_hflags_a32_newel)11340 void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
11341 {
11342 int el = arm_current_el(env);
11343 int fp_el = fp_exception_el(env, el);
11344 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11345 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
11346 }
11347
HELPER(rebuild_hflags_a32)11348 void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
11349 {
11350 int fp_el = fp_exception_el(env, el);
11351 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11352
11353 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
11354 }
11355
HELPER(rebuild_hflags_a64)11356 void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
11357 {
11358 int fp_el = fp_exception_el(env, el);
11359 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11360
11361 env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
11362 }
11363
cpu_get_tb_cpu_state(CPUARMState * env,target_ulong * pc,target_ulong * cs_base,uint32_t * pflags)11364 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
11365 target_ulong *cs_base, uint32_t *pflags)
11366 {
11367 uint32_t flags = env->hflags;
11368 uint32_t pstate_for_ss;
11369
11370 *cs_base = 0;
11371 #ifdef CONFIG_DEBUG_TCG
11372 assert(flags == rebuild_hflags_internal(env));
11373 #endif
11374
11375 if (FIELD_EX32(flags, TBFLAG_ANY, AARCH64_STATE)) {
11376 *pc = env->pc;
11377 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
11378 flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
11379 }
11380 pstate_for_ss = env->pstate;
11381 } else {
11382 *pc = env->regs[15];
11383
11384 if (arm_feature(env, ARM_FEATURE_M)) {
11385 if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11386 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
11387 != env->v7m.secure) {
11388 flags = FIELD_DP32(flags, TBFLAG_A32, FPCCR_S_WRONG, 1);
11389 }
11390
11391 if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
11392 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
11393 (env->v7m.secure &&
11394 !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
11395 /*
11396 * ASPEN is set, but FPCA/SFPA indicate that there is no
11397 * active FP context; we must create a new FP context before
11398 * executing any FP insn.
11399 */
11400 flags = FIELD_DP32(flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED, 1);
11401 }
11402
11403 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
11404 if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
11405 flags = FIELD_DP32(flags, TBFLAG_A32, LSPACT, 1);
11406 }
11407 } else {
11408 /*
11409 * Note that XSCALE_CPAR shares bits with VECSTRIDE.
11410 * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
11411 */
11412 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
11413 flags = FIELD_DP32(flags, TBFLAG_A32,
11414 XSCALE_CPAR, env->cp15.c15_cpar);
11415 } else {
11416 flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN,
11417 env->vfp.vec_len);
11418 flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE,
11419 env->vfp.vec_stride);
11420 }
11421 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
11422 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
11423 }
11424 }
11425
11426 flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb);
11427 flags = FIELD_DP32(flags, TBFLAG_A32, CONDEXEC, env->condexec_bits);
11428 pstate_for_ss = env->uncached_cpsr;
11429 }
11430
11431 /*
11432 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
11433 * states defined in the ARM ARM for software singlestep:
11434 * SS_ACTIVE PSTATE.SS State
11435 * 0 x Inactive (the TB flag for SS is always 0)
11436 * 1 0 Active-pending
11437 * 1 1 Active-not-pending
11438 * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB.
11439 */
11440 if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) &&
11441 (pstate_for_ss & PSTATE_SS)) {
11442 flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
11443 }
11444
11445 *pflags = flags;
11446 }
11447
11448 #ifdef TARGET_AARCH64
11449 /*
11450 * The manual says that when SVE is enabled and VQ is widened the
11451 * implementation is allowed to zero the previously inaccessible
11452 * portion of the registers. The corollary to that is that when
11453 * SVE is enabled and VQ is narrowed we are also allowed to zero
11454 * the now inaccessible portion of the registers.
11455 *
11456 * The intent of this is that no predicate bit beyond VQ is ever set.
11457 * Which means that some operations on predicate registers themselves
11458 * may operate on full uint64_t or even unrolled across the maximum
11459 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
11460 * may well be cheaper than conditionals to restrict the operation
11461 * to the relevant portion of a uint16_t[16].
11462 */
aarch64_sve_narrow_vq(CPUARMState * env,unsigned vq)11463 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
11464 {
11465 int i, j;
11466 uint64_t pmask;
11467
11468 assert(vq >= 1 && vq <= ARM_MAX_VQ);
11469 assert(vq <= env_archcpu(env)->sve_max_vq);
11470
11471 /* Zap the high bits of the zregs. */
11472 for (i = 0; i < 32; i++) {
11473 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
11474 }
11475
11476 /* Zap the high bits of the pregs and ffr. */
11477 pmask = 0;
11478 if (vq & 3) {
11479 pmask = ~(-1ULL << (16 * (vq & 3)));
11480 }
11481 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
11482 for (i = 0; i < 17; ++i) {
11483 env->vfp.pregs[i].p[j] &= pmask;
11484 }
11485 pmask = 0;
11486 }
11487 }
11488
11489 /*
11490 * Notice a change in SVE vector size when changing EL.
11491 */
aarch64_sve_change_el(CPUARMState * env,int old_el,int new_el,bool el0_a64)11492 void aarch64_sve_change_el(CPUARMState *env, int old_el,
11493 int new_el, bool el0_a64)
11494 {
11495 ARMCPU *cpu = env_archcpu(env);
11496 int old_len, new_len;
11497 bool old_a64, new_a64;
11498
11499 /* Nothing to do if no SVE. */
11500 if (!cpu_isar_feature(aa64_sve, cpu)) {
11501 return;
11502 }
11503
11504 /* Nothing to do if FP is disabled in either EL. */
11505 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
11506 return;
11507 }
11508
11509 /*
11510 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
11511 * at ELx, or not available because the EL is in AArch32 state, then
11512 * for all purposes other than a direct read, the ZCR_ELx.LEN field
11513 * has an effective value of 0".
11514 *
11515 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
11516 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
11517 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
11518 * we already have the correct register contents when encountering the
11519 * vq0->vq0 transition between EL0->EL1.
11520 */
11521 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
11522 old_len = (old_a64 && !sve_exception_el(env, old_el)
11523 ? sve_zcr_len_for_el(env, old_el) : 0);
11524 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
11525 new_len = (new_a64 && !sve_exception_el(env, new_el)
11526 ? sve_zcr_len_for_el(env, new_el) : 0);
11527
11528 /* When changing vector length, clear inaccessible state. */
11529 if (new_len < old_len) {
11530 aarch64_sve_narrow_vq(env, new_len + 1);
11531 }
11532 }
11533 #endif
11534