xref: /qemu/target/arm/helper.c (revision 20daa90a)
1 #include "qemu/osdep.h"
2 #include "trace.h"
3 #include "cpu.h"
4 #include "internals.h"
5 #include "exec/gdbstub.h"
6 #include "exec/helper-proto.h"
7 #include "qemu/host-utils.h"
8 #include "sysemu/arch_init.h"
9 #include "sysemu/sysemu.h"
10 #include "qemu/bitops.h"
11 #include "qemu/crc32c.h"
12 #include "exec/exec-all.h"
13 #include "exec/cpu_ldst.h"
14 #include "arm_ldst.h"
15 #include <zlib.h> /* For crc32 */
16 #include "exec/semihost.h"
17 #include "sysemu/kvm.h"
18 
19 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
20 
21 #ifndef CONFIG_USER_ONLY
22 static bool get_phys_addr(CPUARMState *env, target_ulong address,
23                           int access_type, ARMMMUIdx mmu_idx,
24                           hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
25                           target_ulong *page_size, uint32_t *fsr,
26                           ARMMMUFaultInfo *fi);
27 
28 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
29                                int access_type, ARMMMUIdx mmu_idx,
30                                hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
31                                target_ulong *page_size_ptr, uint32_t *fsr,
32                                ARMMMUFaultInfo *fi);
33 
34 /* Definitions for the PMCCNTR and PMCR registers */
35 #define PMCRD   0x8
36 #define PMCRC   0x4
37 #define PMCRE   0x1
38 #endif
39 
40 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
41 {
42     int nregs;
43 
44     /* VFP data registers are always little-endian.  */
45     nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
46     if (reg < nregs) {
47         stfq_le_p(buf, env->vfp.regs[reg]);
48         return 8;
49     }
50     if (arm_feature(env, ARM_FEATURE_NEON)) {
51         /* Aliases for Q regs.  */
52         nregs += 16;
53         if (reg < nregs) {
54             stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
55             stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
56             return 16;
57         }
58     }
59     switch (reg - nregs) {
60     case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
61     case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
62     case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
63     }
64     return 0;
65 }
66 
67 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
68 {
69     int nregs;
70 
71     nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
72     if (reg < nregs) {
73         env->vfp.regs[reg] = ldfq_le_p(buf);
74         return 8;
75     }
76     if (arm_feature(env, ARM_FEATURE_NEON)) {
77         nregs += 16;
78         if (reg < nregs) {
79             env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
80             env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
81             return 16;
82         }
83     }
84     switch (reg - nregs) {
85     case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
86     case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
87     case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
88     }
89     return 0;
90 }
91 
92 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
93 {
94     switch (reg) {
95     case 0 ... 31:
96         /* 128 bit FP register */
97         stfq_le_p(buf, env->vfp.regs[reg * 2]);
98         stfq_le_p(buf + 8, env->vfp.regs[reg * 2 + 1]);
99         return 16;
100     case 32:
101         /* FPSR */
102         stl_p(buf, vfp_get_fpsr(env));
103         return 4;
104     case 33:
105         /* FPCR */
106         stl_p(buf, vfp_get_fpcr(env));
107         return 4;
108     default:
109         return 0;
110     }
111 }
112 
113 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
114 {
115     switch (reg) {
116     case 0 ... 31:
117         /* 128 bit FP register */
118         env->vfp.regs[reg * 2] = ldfq_le_p(buf);
119         env->vfp.regs[reg * 2 + 1] = ldfq_le_p(buf + 8);
120         return 16;
121     case 32:
122         /* FPSR */
123         vfp_set_fpsr(env, ldl_p(buf));
124         return 4;
125     case 33:
126         /* FPCR */
127         vfp_set_fpcr(env, ldl_p(buf));
128         return 4;
129     default:
130         return 0;
131     }
132 }
133 
134 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
135 {
136     assert(ri->fieldoffset);
137     if (cpreg_field_is_64bit(ri)) {
138         return CPREG_FIELD64(env, ri);
139     } else {
140         return CPREG_FIELD32(env, ri);
141     }
142 }
143 
144 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
145                       uint64_t value)
146 {
147     assert(ri->fieldoffset);
148     if (cpreg_field_is_64bit(ri)) {
149         CPREG_FIELD64(env, ri) = value;
150     } else {
151         CPREG_FIELD32(env, ri) = value;
152     }
153 }
154 
155 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
156 {
157     return (char *)env + ri->fieldoffset;
158 }
159 
160 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
161 {
162     /* Raw read of a coprocessor register (as needed for migration, etc). */
163     if (ri->type & ARM_CP_CONST) {
164         return ri->resetvalue;
165     } else if (ri->raw_readfn) {
166         return ri->raw_readfn(env, ri);
167     } else if (ri->readfn) {
168         return ri->readfn(env, ri);
169     } else {
170         return raw_read(env, ri);
171     }
172 }
173 
174 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
175                              uint64_t v)
176 {
177     /* Raw write of a coprocessor register (as needed for migration, etc).
178      * Note that constant registers are treated as write-ignored; the
179      * caller should check for success by whether a readback gives the
180      * value written.
181      */
182     if (ri->type & ARM_CP_CONST) {
183         return;
184     } else if (ri->raw_writefn) {
185         ri->raw_writefn(env, ri, v);
186     } else if (ri->writefn) {
187         ri->writefn(env, ri, v);
188     } else {
189         raw_write(env, ri, v);
190     }
191 }
192 
193 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
194 {
195    /* Return true if the regdef would cause an assertion if you called
196     * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
197     * program bug for it not to have the NO_RAW flag).
198     * NB that returning false here doesn't necessarily mean that calling
199     * read/write_raw_cp_reg() is safe, because we can't distinguish "has
200     * read/write access functions which are safe for raw use" from "has
201     * read/write access functions which have side effects but has forgotten
202     * to provide raw access functions".
203     * The tests here line up with the conditions in read/write_raw_cp_reg()
204     * and assertions in raw_read()/raw_write().
205     */
206     if ((ri->type & ARM_CP_CONST) ||
207         ri->fieldoffset ||
208         ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
209         return false;
210     }
211     return true;
212 }
213 
214 bool write_cpustate_to_list(ARMCPU *cpu)
215 {
216     /* Write the coprocessor state from cpu->env to the (index,value) list. */
217     int i;
218     bool ok = true;
219 
220     for (i = 0; i < cpu->cpreg_array_len; i++) {
221         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
222         const ARMCPRegInfo *ri;
223 
224         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
225         if (!ri) {
226             ok = false;
227             continue;
228         }
229         if (ri->type & ARM_CP_NO_RAW) {
230             continue;
231         }
232         cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
233     }
234     return ok;
235 }
236 
237 bool write_list_to_cpustate(ARMCPU *cpu)
238 {
239     int i;
240     bool ok = true;
241 
242     for (i = 0; i < cpu->cpreg_array_len; i++) {
243         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
244         uint64_t v = cpu->cpreg_values[i];
245         const ARMCPRegInfo *ri;
246 
247         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
248         if (!ri) {
249             ok = false;
250             continue;
251         }
252         if (ri->type & ARM_CP_NO_RAW) {
253             continue;
254         }
255         /* Write value and confirm it reads back as written
256          * (to catch read-only registers and partially read-only
257          * registers where the incoming migration value doesn't match)
258          */
259         write_raw_cp_reg(&cpu->env, ri, v);
260         if (read_raw_cp_reg(&cpu->env, ri) != v) {
261             ok = false;
262         }
263     }
264     return ok;
265 }
266 
267 static void add_cpreg_to_list(gpointer key, gpointer opaque)
268 {
269     ARMCPU *cpu = opaque;
270     uint64_t regidx;
271     const ARMCPRegInfo *ri;
272 
273     regidx = *(uint32_t *)key;
274     ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
275 
276     if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
277         cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
278         /* The value array need not be initialized at this point */
279         cpu->cpreg_array_len++;
280     }
281 }
282 
283 static void count_cpreg(gpointer key, gpointer opaque)
284 {
285     ARMCPU *cpu = opaque;
286     uint64_t regidx;
287     const ARMCPRegInfo *ri;
288 
289     regidx = *(uint32_t *)key;
290     ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
291 
292     if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
293         cpu->cpreg_array_len++;
294     }
295 }
296 
297 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
298 {
299     uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
300     uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
301 
302     if (aidx > bidx) {
303         return 1;
304     }
305     if (aidx < bidx) {
306         return -1;
307     }
308     return 0;
309 }
310 
311 void init_cpreg_list(ARMCPU *cpu)
312 {
313     /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
314      * Note that we require cpreg_tuples[] to be sorted by key ID.
315      */
316     GList *keys;
317     int arraylen;
318 
319     keys = g_hash_table_get_keys(cpu->cp_regs);
320     keys = g_list_sort(keys, cpreg_key_compare);
321 
322     cpu->cpreg_array_len = 0;
323 
324     g_list_foreach(keys, count_cpreg, cpu);
325 
326     arraylen = cpu->cpreg_array_len;
327     cpu->cpreg_indexes = g_new(uint64_t, arraylen);
328     cpu->cpreg_values = g_new(uint64_t, arraylen);
329     cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
330     cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
331     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
332     cpu->cpreg_array_len = 0;
333 
334     g_list_foreach(keys, add_cpreg_to_list, cpu);
335 
336     assert(cpu->cpreg_array_len == arraylen);
337 
338     g_list_free(keys);
339 }
340 
341 /*
342  * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
343  * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
344  *
345  * access_el3_aa32ns: Used to check AArch32 register views.
346  * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
347  */
348 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
349                                         const ARMCPRegInfo *ri,
350                                         bool isread)
351 {
352     bool secure = arm_is_secure_below_el3(env);
353 
354     assert(!arm_el_is_aa64(env, 3));
355     if (secure) {
356         return CP_ACCESS_TRAP_UNCATEGORIZED;
357     }
358     return CP_ACCESS_OK;
359 }
360 
361 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
362                                                 const ARMCPRegInfo *ri,
363                                                 bool isread)
364 {
365     if (!arm_el_is_aa64(env, 3)) {
366         return access_el3_aa32ns(env, ri, isread);
367     }
368     return CP_ACCESS_OK;
369 }
370 
371 /* Some secure-only AArch32 registers trap to EL3 if used from
372  * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
373  * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
374  * We assume that the .access field is set to PL1_RW.
375  */
376 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
377                                             const ARMCPRegInfo *ri,
378                                             bool isread)
379 {
380     if (arm_current_el(env) == 3) {
381         return CP_ACCESS_OK;
382     }
383     if (arm_is_secure_below_el3(env)) {
384         return CP_ACCESS_TRAP_EL3;
385     }
386     /* This will be EL1 NS and EL2 NS, which just UNDEF */
387     return CP_ACCESS_TRAP_UNCATEGORIZED;
388 }
389 
390 /* Check for traps to "powerdown debug" registers, which are controlled
391  * by MDCR.TDOSA
392  */
393 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
394                                    bool isread)
395 {
396     int el = arm_current_el(env);
397 
398     if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA)
399         && !arm_is_secure_below_el3(env)) {
400         return CP_ACCESS_TRAP_EL2;
401     }
402     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
403         return CP_ACCESS_TRAP_EL3;
404     }
405     return CP_ACCESS_OK;
406 }
407 
408 /* Check for traps to "debug ROM" registers, which are controlled
409  * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
410  */
411 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
412                                   bool isread)
413 {
414     int el = arm_current_el(env);
415 
416     if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA)
417         && !arm_is_secure_below_el3(env)) {
418         return CP_ACCESS_TRAP_EL2;
419     }
420     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
421         return CP_ACCESS_TRAP_EL3;
422     }
423     return CP_ACCESS_OK;
424 }
425 
426 /* Check for traps to general debug registers, which are controlled
427  * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
428  */
429 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
430                                   bool isread)
431 {
432     int el = arm_current_el(env);
433 
434     if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA)
435         && !arm_is_secure_below_el3(env)) {
436         return CP_ACCESS_TRAP_EL2;
437     }
438     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
439         return CP_ACCESS_TRAP_EL3;
440     }
441     return CP_ACCESS_OK;
442 }
443 
444 /* Check for traps to performance monitor registers, which are controlled
445  * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
446  */
447 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
448                                  bool isread)
449 {
450     int el = arm_current_el(env);
451 
452     if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
453         && !arm_is_secure_below_el3(env)) {
454         return CP_ACCESS_TRAP_EL2;
455     }
456     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
457         return CP_ACCESS_TRAP_EL3;
458     }
459     return CP_ACCESS_OK;
460 }
461 
462 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
463 {
464     ARMCPU *cpu = arm_env_get_cpu(env);
465 
466     raw_write(env, ri, value);
467     tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
468 }
469 
470 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
471 {
472     ARMCPU *cpu = arm_env_get_cpu(env);
473 
474     if (raw_read(env, ri) != value) {
475         /* Unlike real hardware the qemu TLB uses virtual addresses,
476          * not modified virtual addresses, so this causes a TLB flush.
477          */
478         tlb_flush(CPU(cpu));
479         raw_write(env, ri, value);
480     }
481 }
482 
483 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
484                              uint64_t value)
485 {
486     ARMCPU *cpu = arm_env_get_cpu(env);
487 
488     if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_MPU)
489         && !extended_addresses_enabled(env)) {
490         /* For VMSA (when not using the LPAE long descriptor page table
491          * format) this register includes the ASID, so do a TLB flush.
492          * For PMSA it is purely a process ID and no action is needed.
493          */
494         tlb_flush(CPU(cpu));
495     }
496     raw_write(env, ri, value);
497 }
498 
499 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
500                           uint64_t value)
501 {
502     /* Invalidate all (TLBIALL) */
503     ARMCPU *cpu = arm_env_get_cpu(env);
504 
505     tlb_flush(CPU(cpu));
506 }
507 
508 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
509                           uint64_t value)
510 {
511     /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
512     ARMCPU *cpu = arm_env_get_cpu(env);
513 
514     tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
515 }
516 
517 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
518                            uint64_t value)
519 {
520     /* Invalidate by ASID (TLBIASID) */
521     ARMCPU *cpu = arm_env_get_cpu(env);
522 
523     tlb_flush(CPU(cpu));
524 }
525 
526 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
527                            uint64_t value)
528 {
529     /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
530     ARMCPU *cpu = arm_env_get_cpu(env);
531 
532     tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
533 }
534 
535 /* IS variants of TLB operations must affect all cores */
536 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
537                              uint64_t value)
538 {
539     CPUState *other_cs;
540 
541     CPU_FOREACH(other_cs) {
542         tlb_flush(other_cs);
543     }
544 }
545 
546 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
547                              uint64_t value)
548 {
549     CPUState *other_cs;
550 
551     CPU_FOREACH(other_cs) {
552         tlb_flush(other_cs);
553     }
554 }
555 
556 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
557                              uint64_t value)
558 {
559     CPUState *other_cs;
560 
561     CPU_FOREACH(other_cs) {
562         tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
563     }
564 }
565 
566 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
567                              uint64_t value)
568 {
569     CPUState *other_cs;
570 
571     CPU_FOREACH(other_cs) {
572         tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
573     }
574 }
575 
576 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
577                                uint64_t value)
578 {
579     CPUState *cs = ENV_GET_CPU(env);
580 
581     tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
582                         ARMMMUIdx_S2NS, -1);
583 }
584 
585 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
586                                   uint64_t value)
587 {
588     CPUState *other_cs;
589 
590     CPU_FOREACH(other_cs) {
591         tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
592                             ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1);
593     }
594 }
595 
596 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
597                             uint64_t value)
598 {
599     /* Invalidate by IPA. This has to invalidate any structures that
600      * contain only stage 2 translation information, but does not need
601      * to apply to structures that contain combined stage 1 and stage 2
602      * translation information.
603      * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
604      */
605     CPUState *cs = ENV_GET_CPU(env);
606     uint64_t pageaddr;
607 
608     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
609         return;
610     }
611 
612     pageaddr = sextract64(value << 12, 0, 40);
613 
614     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S2NS, -1);
615 }
616 
617 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
618                                uint64_t value)
619 {
620     CPUState *other_cs;
621     uint64_t pageaddr;
622 
623     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
624         return;
625     }
626 
627     pageaddr = sextract64(value << 12, 0, 40);
628 
629     CPU_FOREACH(other_cs) {
630         tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S2NS, -1);
631     }
632 }
633 
634 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
635                               uint64_t value)
636 {
637     CPUState *cs = ENV_GET_CPU(env);
638 
639     tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1);
640 }
641 
642 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
643                                  uint64_t value)
644 {
645     CPUState *other_cs;
646 
647     CPU_FOREACH(other_cs) {
648         tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1);
649     }
650 }
651 
652 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
653                               uint64_t value)
654 {
655     CPUState *cs = ENV_GET_CPU(env);
656     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
657 
658     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E2, -1);
659 }
660 
661 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
662                                  uint64_t value)
663 {
664     CPUState *other_cs;
665     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
666 
667     CPU_FOREACH(other_cs) {
668         tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E2, -1);
669     }
670 }
671 
672 static const ARMCPRegInfo cp_reginfo[] = {
673     /* Define the secure and non-secure FCSE identifier CP registers
674      * separately because there is no secure bank in V8 (no _EL3).  This allows
675      * the secure register to be properly reset and migrated. There is also no
676      * v8 EL1 version of the register so the non-secure instance stands alone.
677      */
678     { .name = "FCSEIDR(NS)",
679       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
680       .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
681       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
682       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
683     { .name = "FCSEIDR(S)",
684       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
685       .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
686       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
687       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
688     /* Define the secure and non-secure context identifier CP registers
689      * separately because there is no secure bank in V8 (no _EL3).  This allows
690      * the secure register to be properly reset and migrated.  In the
691      * non-secure case, the 32-bit register will have reset and migration
692      * disabled during registration as it is handled by the 64-bit instance.
693      */
694     { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
695       .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
696       .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
697       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
698       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
699     { .name = "CONTEXTIDR(S)", .state = ARM_CP_STATE_AA32,
700       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
701       .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
702       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
703       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
704     REGINFO_SENTINEL
705 };
706 
707 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
708     /* NB: Some of these registers exist in v8 but with more precise
709      * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
710      */
711     /* MMU Domain access control / MPU write buffer control */
712     { .name = "DACR",
713       .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
714       .access = PL1_RW, .resetvalue = 0,
715       .writefn = dacr_write, .raw_writefn = raw_write,
716       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
717                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
718     /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
719      * For v6 and v5, these mappings are overly broad.
720      */
721     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
722       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
723     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
724       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
725     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
726       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
727     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
728       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
729     /* Cache maintenance ops; some of this space may be overridden later. */
730     { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
731       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
732       .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
733     REGINFO_SENTINEL
734 };
735 
736 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
737     /* Not all pre-v6 cores implemented this WFI, so this is slightly
738      * over-broad.
739      */
740     { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
741       .access = PL1_W, .type = ARM_CP_WFI },
742     REGINFO_SENTINEL
743 };
744 
745 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
746     /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
747      * is UNPREDICTABLE; we choose to NOP as most implementations do).
748      */
749     { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
750       .access = PL1_W, .type = ARM_CP_WFI },
751     /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
752      * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
753      * OMAPCP will override this space.
754      */
755     { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
756       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
757       .resetvalue = 0 },
758     { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
759       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
760       .resetvalue = 0 },
761     /* v6 doesn't have the cache ID registers but Linux reads them anyway */
762     { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
763       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
764       .resetvalue = 0 },
765     /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
766      * implementing it as RAZ means the "debug architecture version" bits
767      * will read as a reserved value, which should cause Linux to not try
768      * to use the debug hardware.
769      */
770     { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
771       .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
772     /* MMU TLB control. Note that the wildcarding means we cover not just
773      * the unified TLB ops but also the dside/iside/inner-shareable variants.
774      */
775     { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
776       .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
777       .type = ARM_CP_NO_RAW },
778     { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
779       .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
780       .type = ARM_CP_NO_RAW },
781     { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
782       .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
783       .type = ARM_CP_NO_RAW },
784     { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
785       .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
786       .type = ARM_CP_NO_RAW },
787     { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
788       .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
789     { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
790       .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
791     REGINFO_SENTINEL
792 };
793 
794 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
795                         uint64_t value)
796 {
797     uint32_t mask = 0;
798 
799     /* In ARMv8 most bits of CPACR_EL1 are RES0. */
800     if (!arm_feature(env, ARM_FEATURE_V8)) {
801         /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
802          * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
803          * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
804          */
805         if (arm_feature(env, ARM_FEATURE_VFP)) {
806             /* VFP coprocessor: cp10 & cp11 [23:20] */
807             mask |= (1 << 31) | (1 << 30) | (0xf << 20);
808 
809             if (!arm_feature(env, ARM_FEATURE_NEON)) {
810                 /* ASEDIS [31] bit is RAO/WI */
811                 value |= (1 << 31);
812             }
813 
814             /* VFPv3 and upwards with NEON implement 32 double precision
815              * registers (D0-D31).
816              */
817             if (!arm_feature(env, ARM_FEATURE_NEON) ||
818                     !arm_feature(env, ARM_FEATURE_VFP3)) {
819                 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
820                 value |= (1 << 30);
821             }
822         }
823         value &= mask;
824     }
825     env->cp15.cpacr_el1 = value;
826 }
827 
828 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
829                                    bool isread)
830 {
831     if (arm_feature(env, ARM_FEATURE_V8)) {
832         /* Check if CPACR accesses are to be trapped to EL2 */
833         if (arm_current_el(env) == 1 &&
834             (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
835             return CP_ACCESS_TRAP_EL2;
836         /* Check if CPACR accesses are to be trapped to EL3 */
837         } else if (arm_current_el(env) < 3 &&
838                    (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
839             return CP_ACCESS_TRAP_EL3;
840         }
841     }
842 
843     return CP_ACCESS_OK;
844 }
845 
846 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
847                                   bool isread)
848 {
849     /* Check if CPTR accesses are set to trap to EL3 */
850     if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
851         return CP_ACCESS_TRAP_EL3;
852     }
853 
854     return CP_ACCESS_OK;
855 }
856 
857 static const ARMCPRegInfo v6_cp_reginfo[] = {
858     /* prefetch by MVA in v6, NOP in v7 */
859     { .name = "MVA_prefetch",
860       .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
861       .access = PL1_W, .type = ARM_CP_NOP },
862     /* We need to break the TB after ISB to execute self-modifying code
863      * correctly and also to take any pending interrupts immediately.
864      * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
865      */
866     { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
867       .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
868     { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
869       .access = PL0_W, .type = ARM_CP_NOP },
870     { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
871       .access = PL0_W, .type = ARM_CP_NOP },
872     { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
873       .access = PL1_RW,
874       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
875                              offsetof(CPUARMState, cp15.ifar_ns) },
876       .resetvalue = 0, },
877     /* Watchpoint Fault Address Register : should actually only be present
878      * for 1136, 1176, 11MPCore.
879      */
880     { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
881       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
882     { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
883       .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
884       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
885       .resetvalue = 0, .writefn = cpacr_write },
886     REGINFO_SENTINEL
887 };
888 
889 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
890                                    bool isread)
891 {
892     /* Performance monitor registers user accessibility is controlled
893      * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
894      * trapping to EL2 or EL3 for other accesses.
895      */
896     int el = arm_current_el(env);
897 
898     if (el == 0 && !env->cp15.c9_pmuserenr) {
899         return CP_ACCESS_TRAP;
900     }
901     if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
902         && !arm_is_secure_below_el3(env)) {
903         return CP_ACCESS_TRAP_EL2;
904     }
905     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
906         return CP_ACCESS_TRAP_EL3;
907     }
908 
909     return CP_ACCESS_OK;
910 }
911 
912 #ifndef CONFIG_USER_ONLY
913 
914 static inline bool arm_ccnt_enabled(CPUARMState *env)
915 {
916     /* This does not support checking PMCCFILTR_EL0 register */
917 
918     if (!(env->cp15.c9_pmcr & PMCRE)) {
919         return false;
920     }
921 
922     return true;
923 }
924 
925 void pmccntr_sync(CPUARMState *env)
926 {
927     uint64_t temp_ticks;
928 
929     temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
930                           ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
931 
932     if (env->cp15.c9_pmcr & PMCRD) {
933         /* Increment once every 64 processor clock cycles */
934         temp_ticks /= 64;
935     }
936 
937     if (arm_ccnt_enabled(env)) {
938         env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
939     }
940 }
941 
942 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
943                        uint64_t value)
944 {
945     pmccntr_sync(env);
946 
947     if (value & PMCRC) {
948         /* The counter has been reset */
949         env->cp15.c15_ccnt = 0;
950     }
951 
952     /* only the DP, X, D and E bits are writable */
953     env->cp15.c9_pmcr &= ~0x39;
954     env->cp15.c9_pmcr |= (value & 0x39);
955 
956     pmccntr_sync(env);
957 }
958 
959 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
960 {
961     uint64_t total_ticks;
962 
963     if (!arm_ccnt_enabled(env)) {
964         /* Counter is disabled, do not change value */
965         return env->cp15.c15_ccnt;
966     }
967 
968     total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
969                            ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
970 
971     if (env->cp15.c9_pmcr & PMCRD) {
972         /* Increment once every 64 processor clock cycles */
973         total_ticks /= 64;
974     }
975     return total_ticks - env->cp15.c15_ccnt;
976 }
977 
978 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
979                         uint64_t value)
980 {
981     uint64_t total_ticks;
982 
983     if (!arm_ccnt_enabled(env)) {
984         /* Counter is disabled, set the absolute value */
985         env->cp15.c15_ccnt = value;
986         return;
987     }
988 
989     total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
990                            ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
991 
992     if (env->cp15.c9_pmcr & PMCRD) {
993         /* Increment once every 64 processor clock cycles */
994         total_ticks /= 64;
995     }
996     env->cp15.c15_ccnt = total_ticks - value;
997 }
998 
999 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1000                             uint64_t value)
1001 {
1002     uint64_t cur_val = pmccntr_read(env, NULL);
1003 
1004     pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1005 }
1006 
1007 #else /* CONFIG_USER_ONLY */
1008 
1009 void pmccntr_sync(CPUARMState *env)
1010 {
1011 }
1012 
1013 #endif
1014 
1015 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1016                             uint64_t value)
1017 {
1018     pmccntr_sync(env);
1019     env->cp15.pmccfiltr_el0 = value & 0x7E000000;
1020     pmccntr_sync(env);
1021 }
1022 
1023 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1024                             uint64_t value)
1025 {
1026     value &= (1 << 31);
1027     env->cp15.c9_pmcnten |= value;
1028 }
1029 
1030 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1031                              uint64_t value)
1032 {
1033     value &= (1 << 31);
1034     env->cp15.c9_pmcnten &= ~value;
1035 }
1036 
1037 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1038                          uint64_t value)
1039 {
1040     env->cp15.c9_pmovsr &= ~value;
1041 }
1042 
1043 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1044                              uint64_t value)
1045 {
1046     env->cp15.c9_pmxevtyper = value & 0xff;
1047 }
1048 
1049 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1050                             uint64_t value)
1051 {
1052     env->cp15.c9_pmuserenr = value & 1;
1053 }
1054 
1055 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1056                              uint64_t value)
1057 {
1058     /* We have no event counters so only the C bit can be changed */
1059     value &= (1 << 31);
1060     env->cp15.c9_pminten |= value;
1061 }
1062 
1063 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1064                              uint64_t value)
1065 {
1066     value &= (1 << 31);
1067     env->cp15.c9_pminten &= ~value;
1068 }
1069 
1070 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1071                        uint64_t value)
1072 {
1073     /* Note that even though the AArch64 view of this register has bits
1074      * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1075      * architectural requirements for bits which are RES0 only in some
1076      * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1077      * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1078      */
1079     raw_write(env, ri, value & ~0x1FULL);
1080 }
1081 
1082 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1083 {
1084     /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1085      * For bits that vary between AArch32/64, code needs to check the
1086      * current execution mode before directly using the feature bit.
1087      */
1088     uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
1089 
1090     if (!arm_feature(env, ARM_FEATURE_EL2)) {
1091         valid_mask &= ~SCR_HCE;
1092 
1093         /* On ARMv7, SMD (or SCD as it is called in v7) is only
1094          * supported if EL2 exists. The bit is UNK/SBZP when
1095          * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1096          * when EL2 is unavailable.
1097          * On ARMv8, this bit is always available.
1098          */
1099         if (arm_feature(env, ARM_FEATURE_V7) &&
1100             !arm_feature(env, ARM_FEATURE_V8)) {
1101             valid_mask &= ~SCR_SMD;
1102         }
1103     }
1104 
1105     /* Clear all-context RES0 bits.  */
1106     value &= valid_mask;
1107     raw_write(env, ri, value);
1108 }
1109 
1110 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1111 {
1112     ARMCPU *cpu = arm_env_get_cpu(env);
1113 
1114     /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1115      * bank
1116      */
1117     uint32_t index = A32_BANKED_REG_GET(env, csselr,
1118                                         ri->secure & ARM_CP_SECSTATE_S);
1119 
1120     return cpu->ccsidr[index];
1121 }
1122 
1123 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1124                          uint64_t value)
1125 {
1126     raw_write(env, ri, value & 0xf);
1127 }
1128 
1129 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1130 {
1131     CPUState *cs = ENV_GET_CPU(env);
1132     uint64_t ret = 0;
1133 
1134     if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1135         ret |= CPSR_I;
1136     }
1137     if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1138         ret |= CPSR_F;
1139     }
1140     /* External aborts are not possible in QEMU so A bit is always clear */
1141     return ret;
1142 }
1143 
1144 static const ARMCPRegInfo v7_cp_reginfo[] = {
1145     /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1146     { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1147       .access = PL1_W, .type = ARM_CP_NOP },
1148     /* Performance monitors are implementation defined in v7,
1149      * but with an ARM recommended set of registers, which we
1150      * follow (although we don't actually implement any counters)
1151      *
1152      * Performance registers fall into three categories:
1153      *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1154      *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1155      *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1156      * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1157      * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1158      */
1159     { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1160       .access = PL0_RW, .type = ARM_CP_ALIAS,
1161       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1162       .writefn = pmcntenset_write,
1163       .accessfn = pmreg_access,
1164       .raw_writefn = raw_write },
1165     { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1166       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1167       .access = PL0_RW, .accessfn = pmreg_access,
1168       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1169       .writefn = pmcntenset_write, .raw_writefn = raw_write },
1170     { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1171       .access = PL0_RW,
1172       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1173       .accessfn = pmreg_access,
1174       .writefn = pmcntenclr_write,
1175       .type = ARM_CP_ALIAS },
1176     { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1177       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1178       .access = PL0_RW, .accessfn = pmreg_access,
1179       .type = ARM_CP_ALIAS,
1180       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1181       .writefn = pmcntenclr_write },
1182     { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1183       .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1184       .accessfn = pmreg_access,
1185       .writefn = pmovsr_write,
1186       .raw_writefn = raw_write },
1187     { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1188       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1189       .access = PL0_RW, .accessfn = pmreg_access,
1190       .type = ARM_CP_ALIAS,
1191       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1192       .writefn = pmovsr_write,
1193       .raw_writefn = raw_write },
1194     /* Unimplemented so WI. */
1195     { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1196       .access = PL0_W, .accessfn = pmreg_access, .type = ARM_CP_NOP },
1197     /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
1198      * We choose to RAZ/WI.
1199      */
1200     { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1201       .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1202       .accessfn = pmreg_access },
1203 #ifndef CONFIG_USER_ONLY
1204     { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1205       .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
1206       .readfn = pmccntr_read, .writefn = pmccntr_write32,
1207       .accessfn = pmreg_access },
1208     { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1209       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1210       .access = PL0_RW, .accessfn = pmreg_access,
1211       .type = ARM_CP_IO,
1212       .readfn = pmccntr_read, .writefn = pmccntr_write, },
1213 #endif
1214     { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1215       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1216       .writefn = pmccfiltr_write,
1217       .access = PL0_RW, .accessfn = pmreg_access,
1218       .type = ARM_CP_IO,
1219       .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1220       .resetvalue = 0, },
1221     { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1222       .access = PL0_RW,
1223       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper),
1224       .accessfn = pmreg_access, .writefn = pmxevtyper_write,
1225       .raw_writefn = raw_write },
1226     /* Unimplemented, RAZ/WI. */
1227     { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1228       .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1229       .accessfn = pmreg_access },
1230     { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1231       .access = PL0_R | PL1_RW, .accessfn = access_tpm,
1232       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1233       .resetvalue = 0,
1234       .writefn = pmuserenr_write, .raw_writefn = raw_write },
1235     { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1236       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1237       .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1238       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1239       .resetvalue = 0,
1240       .writefn = pmuserenr_write, .raw_writefn = raw_write },
1241     { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1242       .access = PL1_RW, .accessfn = access_tpm,
1243       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1244       .resetvalue = 0,
1245       .writefn = pmintenset_write, .raw_writefn = raw_write },
1246     { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
1247       .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1248       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1249       .writefn = pmintenclr_write, },
1250     { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1251       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
1252       .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1253       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1254       .writefn = pmintenclr_write },
1255     { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
1256       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
1257       .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
1258     { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
1259       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
1260       .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
1261       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
1262                              offsetof(CPUARMState, cp15.csselr_ns) } },
1263     /* Auxiliary ID register: this actually has an IMPDEF value but for now
1264      * just RAZ for all cores:
1265      */
1266     { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
1267       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
1268       .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1269     /* Auxiliary fault status registers: these also are IMPDEF, and we
1270      * choose to RAZ/WI for all cores.
1271      */
1272     { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
1273       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
1274       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1275     { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
1276       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
1277       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1278     /* MAIR can just read-as-written because we don't implement caches
1279      * and so don't need to care about memory attributes.
1280      */
1281     { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
1282       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1283       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
1284       .resetvalue = 0 },
1285     { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
1286       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
1287       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
1288       .resetvalue = 0 },
1289     /* For non-long-descriptor page tables these are PRRR and NMRR;
1290      * regardless they still act as reads-as-written for QEMU.
1291      */
1292      /* MAIR0/1 are defined separately from their 64-bit counterpart which
1293       * allows them to assign the correct fieldoffset based on the endianness
1294       * handled in the field definitions.
1295       */
1296     { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1297       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
1298       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1299                              offsetof(CPUARMState, cp15.mair0_ns) },
1300       .resetfn = arm_cp_reset_ignore },
1301     { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1302       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
1303       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1304                              offsetof(CPUARMState, cp15.mair1_ns) },
1305       .resetfn = arm_cp_reset_ignore },
1306     { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1307       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1308       .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1309     /* 32 bit ITLB invalidates */
1310     { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
1311       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1312     { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
1313       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1314     { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
1315       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1316     /* 32 bit DTLB invalidates */
1317     { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
1318       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1319     { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
1320       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1321     { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
1322       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1323     /* 32 bit TLB invalidates */
1324     { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
1325       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1326     { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
1327       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1328     { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
1329       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1330     { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
1331       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
1332     REGINFO_SENTINEL
1333 };
1334 
1335 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
1336     /* 32 bit TLB invalidates, Inner Shareable */
1337     { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1338       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
1339     { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
1340       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
1341     { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
1342       .type = ARM_CP_NO_RAW, .access = PL1_W,
1343       .writefn = tlbiasid_is_write },
1344     { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
1345       .type = ARM_CP_NO_RAW, .access = PL1_W,
1346       .writefn = tlbimvaa_is_write },
1347     REGINFO_SENTINEL
1348 };
1349 
1350 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1351                         uint64_t value)
1352 {
1353     value &= 1;
1354     env->teecr = value;
1355 }
1356 
1357 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1358                                     bool isread)
1359 {
1360     if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1361         return CP_ACCESS_TRAP;
1362     }
1363     return CP_ACCESS_OK;
1364 }
1365 
1366 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1367     { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1368       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1369       .resetvalue = 0,
1370       .writefn = teecr_write },
1371     { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1372       .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1373       .accessfn = teehbr_access, .resetvalue = 0 },
1374     REGINFO_SENTINEL
1375 };
1376 
1377 static const ARMCPRegInfo v6k_cp_reginfo[] = {
1378     { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1379       .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1380       .access = PL0_RW,
1381       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1382     { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1383       .access = PL0_RW,
1384       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1385                              offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1386       .resetfn = arm_cp_reset_ignore },
1387     { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1388       .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1389       .access = PL0_R|PL1_W,
1390       .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1391       .resetvalue = 0},
1392     { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1393       .access = PL0_R|PL1_W,
1394       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1395                              offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1396       .resetfn = arm_cp_reset_ignore },
1397     { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1398       .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1399       .access = PL1_RW,
1400       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1401     { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1402       .access = PL1_RW,
1403       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1404                              offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1405       .resetvalue = 0 },
1406     REGINFO_SENTINEL
1407 };
1408 
1409 #ifndef CONFIG_USER_ONLY
1410 
1411 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1412                                        bool isread)
1413 {
1414     /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1415      * Writable only at the highest implemented exception level.
1416      */
1417     int el = arm_current_el(env);
1418 
1419     switch (el) {
1420     case 0:
1421         if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
1422             return CP_ACCESS_TRAP;
1423         }
1424         break;
1425     case 1:
1426         if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1427             arm_is_secure_below_el3(env)) {
1428             /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1429             return CP_ACCESS_TRAP_UNCATEGORIZED;
1430         }
1431         break;
1432     case 2:
1433     case 3:
1434         break;
1435     }
1436 
1437     if (!isread && el < arm_highest_el(env)) {
1438         return CP_ACCESS_TRAP_UNCATEGORIZED;
1439     }
1440 
1441     return CP_ACCESS_OK;
1442 }
1443 
1444 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1445                                         bool isread)
1446 {
1447     unsigned int cur_el = arm_current_el(env);
1448     bool secure = arm_is_secure(env);
1449 
1450     /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1451     if (cur_el == 0 &&
1452         !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1453         return CP_ACCESS_TRAP;
1454     }
1455 
1456     if (arm_feature(env, ARM_FEATURE_EL2) &&
1457         timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1458         !extract32(env->cp15.cnthctl_el2, 0, 1)) {
1459         return CP_ACCESS_TRAP_EL2;
1460     }
1461     return CP_ACCESS_OK;
1462 }
1463 
1464 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1465                                       bool isread)
1466 {
1467     unsigned int cur_el = arm_current_el(env);
1468     bool secure = arm_is_secure(env);
1469 
1470     /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1471      * EL0[PV]TEN is zero.
1472      */
1473     if (cur_el == 0 &&
1474         !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1475         return CP_ACCESS_TRAP;
1476     }
1477 
1478     if (arm_feature(env, ARM_FEATURE_EL2) &&
1479         timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1480         !extract32(env->cp15.cnthctl_el2, 1, 1)) {
1481         return CP_ACCESS_TRAP_EL2;
1482     }
1483     return CP_ACCESS_OK;
1484 }
1485 
1486 static CPAccessResult gt_pct_access(CPUARMState *env,
1487                                     const ARMCPRegInfo *ri,
1488                                     bool isread)
1489 {
1490     return gt_counter_access(env, GTIMER_PHYS, isread);
1491 }
1492 
1493 static CPAccessResult gt_vct_access(CPUARMState *env,
1494                                     const ARMCPRegInfo *ri,
1495                                     bool isread)
1496 {
1497     return gt_counter_access(env, GTIMER_VIRT, isread);
1498 }
1499 
1500 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1501                                        bool isread)
1502 {
1503     return gt_timer_access(env, GTIMER_PHYS, isread);
1504 }
1505 
1506 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1507                                        bool isread)
1508 {
1509     return gt_timer_access(env, GTIMER_VIRT, isread);
1510 }
1511 
1512 static CPAccessResult gt_stimer_access(CPUARMState *env,
1513                                        const ARMCPRegInfo *ri,
1514                                        bool isread)
1515 {
1516     /* The AArch64 register view of the secure physical timer is
1517      * always accessible from EL3, and configurably accessible from
1518      * Secure EL1.
1519      */
1520     switch (arm_current_el(env)) {
1521     case 1:
1522         if (!arm_is_secure(env)) {
1523             return CP_ACCESS_TRAP;
1524         }
1525         if (!(env->cp15.scr_el3 & SCR_ST)) {
1526             return CP_ACCESS_TRAP_EL3;
1527         }
1528         return CP_ACCESS_OK;
1529     case 0:
1530     case 2:
1531         return CP_ACCESS_TRAP;
1532     case 3:
1533         return CP_ACCESS_OK;
1534     default:
1535         g_assert_not_reached();
1536     }
1537 }
1538 
1539 static uint64_t gt_get_countervalue(CPUARMState *env)
1540 {
1541     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
1542 }
1543 
1544 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1545 {
1546     ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1547 
1548     if (gt->ctl & 1) {
1549         /* Timer enabled: calculate and set current ISTATUS, irq, and
1550          * reset timer to when ISTATUS next has to change
1551          */
1552         uint64_t offset = timeridx == GTIMER_VIRT ?
1553                                       cpu->env.cp15.cntvoff_el2 : 0;
1554         uint64_t count = gt_get_countervalue(&cpu->env);
1555         /* Note that this must be unsigned 64 bit arithmetic: */
1556         int istatus = count - offset >= gt->cval;
1557         uint64_t nexttick;
1558         int irqstate;
1559 
1560         gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1561 
1562         irqstate = (istatus && !(gt->ctl & 2));
1563         qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1564 
1565         if (istatus) {
1566             /* Next transition is when count rolls back over to zero */
1567             nexttick = UINT64_MAX;
1568         } else {
1569             /* Next transition is when we hit cval */
1570             nexttick = gt->cval + offset;
1571         }
1572         /* Note that the desired next expiry time might be beyond the
1573          * signed-64-bit range of a QEMUTimer -- in this case we just
1574          * set the timer for as far in the future as possible. When the
1575          * timer expires we will reset the timer for any remaining period.
1576          */
1577         if (nexttick > INT64_MAX / GTIMER_SCALE) {
1578             nexttick = INT64_MAX / GTIMER_SCALE;
1579         }
1580         timer_mod(cpu->gt_timer[timeridx], nexttick);
1581         trace_arm_gt_recalc(timeridx, irqstate, nexttick);
1582     } else {
1583         /* Timer disabled: ISTATUS and timer output always clear */
1584         gt->ctl &= ~4;
1585         qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
1586         timer_del(cpu->gt_timer[timeridx]);
1587         trace_arm_gt_recalc_disabled(timeridx);
1588     }
1589 }
1590 
1591 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1592                            int timeridx)
1593 {
1594     ARMCPU *cpu = arm_env_get_cpu(env);
1595 
1596     timer_del(cpu->gt_timer[timeridx]);
1597 }
1598 
1599 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1600 {
1601     return gt_get_countervalue(env);
1602 }
1603 
1604 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1605 {
1606     return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
1607 }
1608 
1609 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1610                           int timeridx,
1611                           uint64_t value)
1612 {
1613     trace_arm_gt_cval_write(timeridx, value);
1614     env->cp15.c14_timer[timeridx].cval = value;
1615     gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1616 }
1617 
1618 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1619                              int timeridx)
1620 {
1621     uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1622 
1623     return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1624                       (gt_get_countervalue(env) - offset));
1625 }
1626 
1627 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1628                           int timeridx,
1629                           uint64_t value)
1630 {
1631     uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1632 
1633     trace_arm_gt_tval_write(timeridx, value);
1634     env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1635                                          sextract64(value, 0, 32);
1636     gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1637 }
1638 
1639 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1640                          int timeridx,
1641                          uint64_t value)
1642 {
1643     ARMCPU *cpu = arm_env_get_cpu(env);
1644     uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1645 
1646     trace_arm_gt_ctl_write(timeridx, value);
1647     env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1648     if ((oldval ^ value) & 1) {
1649         /* Enable toggled */
1650         gt_recalc_timer(cpu, timeridx);
1651     } else if ((oldval ^ value) & 2) {
1652         /* IMASK toggled: don't need to recalculate,
1653          * just set the interrupt line based on ISTATUS
1654          */
1655         int irqstate = (oldval & 4) && !(value & 2);
1656 
1657         trace_arm_gt_imask_toggle(timeridx, irqstate);
1658         qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1659     }
1660 }
1661 
1662 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1663 {
1664     gt_timer_reset(env, ri, GTIMER_PHYS);
1665 }
1666 
1667 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1668                                uint64_t value)
1669 {
1670     gt_cval_write(env, ri, GTIMER_PHYS, value);
1671 }
1672 
1673 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1674 {
1675     return gt_tval_read(env, ri, GTIMER_PHYS);
1676 }
1677 
1678 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1679                                uint64_t value)
1680 {
1681     gt_tval_write(env, ri, GTIMER_PHYS, value);
1682 }
1683 
1684 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1685                               uint64_t value)
1686 {
1687     gt_ctl_write(env, ri, GTIMER_PHYS, value);
1688 }
1689 
1690 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1691 {
1692     gt_timer_reset(env, ri, GTIMER_VIRT);
1693 }
1694 
1695 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1696                                uint64_t value)
1697 {
1698     gt_cval_write(env, ri, GTIMER_VIRT, value);
1699 }
1700 
1701 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1702 {
1703     return gt_tval_read(env, ri, GTIMER_VIRT);
1704 }
1705 
1706 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1707                                uint64_t value)
1708 {
1709     gt_tval_write(env, ri, GTIMER_VIRT, value);
1710 }
1711 
1712 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1713                               uint64_t value)
1714 {
1715     gt_ctl_write(env, ri, GTIMER_VIRT, value);
1716 }
1717 
1718 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1719                               uint64_t value)
1720 {
1721     ARMCPU *cpu = arm_env_get_cpu(env);
1722 
1723     trace_arm_gt_cntvoff_write(value);
1724     raw_write(env, ri, value);
1725     gt_recalc_timer(cpu, GTIMER_VIRT);
1726 }
1727 
1728 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1729 {
1730     gt_timer_reset(env, ri, GTIMER_HYP);
1731 }
1732 
1733 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1734                               uint64_t value)
1735 {
1736     gt_cval_write(env, ri, GTIMER_HYP, value);
1737 }
1738 
1739 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1740 {
1741     return gt_tval_read(env, ri, GTIMER_HYP);
1742 }
1743 
1744 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1745                               uint64_t value)
1746 {
1747     gt_tval_write(env, ri, GTIMER_HYP, value);
1748 }
1749 
1750 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1751                               uint64_t value)
1752 {
1753     gt_ctl_write(env, ri, GTIMER_HYP, value);
1754 }
1755 
1756 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1757 {
1758     gt_timer_reset(env, ri, GTIMER_SEC);
1759 }
1760 
1761 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1762                               uint64_t value)
1763 {
1764     gt_cval_write(env, ri, GTIMER_SEC, value);
1765 }
1766 
1767 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1768 {
1769     return gt_tval_read(env, ri, GTIMER_SEC);
1770 }
1771 
1772 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1773                               uint64_t value)
1774 {
1775     gt_tval_write(env, ri, GTIMER_SEC, value);
1776 }
1777 
1778 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1779                               uint64_t value)
1780 {
1781     gt_ctl_write(env, ri, GTIMER_SEC, value);
1782 }
1783 
1784 void arm_gt_ptimer_cb(void *opaque)
1785 {
1786     ARMCPU *cpu = opaque;
1787 
1788     gt_recalc_timer(cpu, GTIMER_PHYS);
1789 }
1790 
1791 void arm_gt_vtimer_cb(void *opaque)
1792 {
1793     ARMCPU *cpu = opaque;
1794 
1795     gt_recalc_timer(cpu, GTIMER_VIRT);
1796 }
1797 
1798 void arm_gt_htimer_cb(void *opaque)
1799 {
1800     ARMCPU *cpu = opaque;
1801 
1802     gt_recalc_timer(cpu, GTIMER_HYP);
1803 }
1804 
1805 void arm_gt_stimer_cb(void *opaque)
1806 {
1807     ARMCPU *cpu = opaque;
1808 
1809     gt_recalc_timer(cpu, GTIMER_SEC);
1810 }
1811 
1812 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1813     /* Note that CNTFRQ is purely reads-as-written for the benefit
1814      * of software; writing it doesn't actually change the timer frequency.
1815      * Our reset value matches the fixed frequency we implement the timer at.
1816      */
1817     { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
1818       .type = ARM_CP_ALIAS,
1819       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1820       .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1821     },
1822     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
1823       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
1824       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1825       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
1826       .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
1827     },
1828     /* overall control: mostly access permissions */
1829     { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
1830       .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
1831       .access = PL1_RW,
1832       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
1833       .resetvalue = 0,
1834     },
1835     /* per-timer control */
1836     { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1837       .secure = ARM_CP_SECSTATE_NS,
1838       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1839       .accessfn = gt_ptimer_access,
1840       .fieldoffset = offsetoflow32(CPUARMState,
1841                                    cp15.c14_timer[GTIMER_PHYS].ctl),
1842       .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1843     },
1844     { .name = "CNTP_CTL(S)",
1845       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1846       .secure = ARM_CP_SECSTATE_S,
1847       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1848       .accessfn = gt_ptimer_access,
1849       .fieldoffset = offsetoflow32(CPUARMState,
1850                                    cp15.c14_timer[GTIMER_SEC].ctl),
1851       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
1852     },
1853     { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
1854       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
1855       .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1856       .accessfn = gt_ptimer_access,
1857       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
1858       .resetvalue = 0,
1859       .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1860     },
1861     { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
1862       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1863       .accessfn = gt_vtimer_access,
1864       .fieldoffset = offsetoflow32(CPUARMState,
1865                                    cp15.c14_timer[GTIMER_VIRT].ctl),
1866       .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1867     },
1868     { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
1869       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
1870       .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1871       .accessfn = gt_vtimer_access,
1872       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
1873       .resetvalue = 0,
1874       .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1875     },
1876     /* TimerValue views: a 32 bit downcounting view of the underlying state */
1877     { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1878       .secure = ARM_CP_SECSTATE_NS,
1879       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1880       .accessfn = gt_ptimer_access,
1881       .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
1882     },
1883     { .name = "CNTP_TVAL(S)",
1884       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1885       .secure = ARM_CP_SECSTATE_S,
1886       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1887       .accessfn = gt_ptimer_access,
1888       .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
1889     },
1890     { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1891       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
1892       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1893       .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
1894       .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
1895     },
1896     { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
1897       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1898       .accessfn = gt_vtimer_access,
1899       .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
1900     },
1901     { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1902       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
1903       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1904       .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
1905       .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
1906     },
1907     /* The counter itself */
1908     { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
1909       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1910       .accessfn = gt_pct_access,
1911       .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
1912     },
1913     { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
1914       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
1915       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1916       .accessfn = gt_pct_access, .readfn = gt_cnt_read,
1917     },
1918     { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
1919       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1920       .accessfn = gt_vct_access,
1921       .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
1922     },
1923     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
1924       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
1925       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1926       .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
1927     },
1928     /* Comparison value, indicating when the timer goes off */
1929     { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
1930       .secure = ARM_CP_SECSTATE_NS,
1931       .access = PL1_RW | PL0_R,
1932       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1933       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1934       .accessfn = gt_ptimer_access,
1935       .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
1936     },
1937     { .name = "CNTP_CVAL(S)", .cp = 15, .crm = 14, .opc1 = 2,
1938       .secure = ARM_CP_SECSTATE_S,
1939       .access = PL1_RW | PL0_R,
1940       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1941       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
1942       .accessfn = gt_ptimer_access,
1943       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
1944     },
1945     { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1946       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
1947       .access = PL1_RW | PL0_R,
1948       .type = ARM_CP_IO,
1949       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1950       .resetvalue = 0, .accessfn = gt_ptimer_access,
1951       .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
1952     },
1953     { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
1954       .access = PL1_RW | PL0_R,
1955       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1956       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1957       .accessfn = gt_vtimer_access,
1958       .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
1959     },
1960     { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1961       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
1962       .access = PL1_RW | PL0_R,
1963       .type = ARM_CP_IO,
1964       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1965       .resetvalue = 0, .accessfn = gt_vtimer_access,
1966       .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
1967     },
1968     /* Secure timer -- this is actually restricted to only EL3
1969      * and configurably Secure-EL1 via the accessfn.
1970      */
1971     { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
1972       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
1973       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
1974       .accessfn = gt_stimer_access,
1975       .readfn = gt_sec_tval_read,
1976       .writefn = gt_sec_tval_write,
1977       .resetfn = gt_sec_timer_reset,
1978     },
1979     { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
1980       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
1981       .type = ARM_CP_IO, .access = PL1_RW,
1982       .accessfn = gt_stimer_access,
1983       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
1984       .resetvalue = 0,
1985       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
1986     },
1987     { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
1988       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
1989       .type = ARM_CP_IO, .access = PL1_RW,
1990       .accessfn = gt_stimer_access,
1991       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
1992       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
1993     },
1994     REGINFO_SENTINEL
1995 };
1996 
1997 #else
1998 /* In user-mode none of the generic timer registers are accessible,
1999  * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
2000  * so instead just don't register any of them.
2001  */
2002 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2003     REGINFO_SENTINEL
2004 };
2005 
2006 #endif
2007 
2008 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2009 {
2010     if (arm_feature(env, ARM_FEATURE_LPAE)) {
2011         raw_write(env, ri, value);
2012     } else if (arm_feature(env, ARM_FEATURE_V7)) {
2013         raw_write(env, ri, value & 0xfffff6ff);
2014     } else {
2015         raw_write(env, ri, value & 0xfffff1ff);
2016     }
2017 }
2018 
2019 #ifndef CONFIG_USER_ONLY
2020 /* get_phys_addr() isn't present for user-mode-only targets */
2021 
2022 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2023                                  bool isread)
2024 {
2025     if (ri->opc2 & 4) {
2026         /* The ATS12NSO* operations must trap to EL3 if executed in
2027          * Secure EL1 (which can only happen if EL3 is AArch64).
2028          * They are simply UNDEF if executed from NS EL1.
2029          * They function normally from EL2 or EL3.
2030          */
2031         if (arm_current_el(env) == 1) {
2032             if (arm_is_secure_below_el3(env)) {
2033                 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2034             }
2035             return CP_ACCESS_TRAP_UNCATEGORIZED;
2036         }
2037     }
2038     return CP_ACCESS_OK;
2039 }
2040 
2041 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2042                              int access_type, ARMMMUIdx mmu_idx)
2043 {
2044     hwaddr phys_addr;
2045     target_ulong page_size;
2046     int prot;
2047     uint32_t fsr;
2048     bool ret;
2049     uint64_t par64;
2050     MemTxAttrs attrs = {};
2051     ARMMMUFaultInfo fi = {};
2052 
2053     ret = get_phys_addr(env, value, access_type, mmu_idx,
2054                         &phys_addr, &attrs, &prot, &page_size, &fsr, &fi);
2055     if (extended_addresses_enabled(env)) {
2056         /* fsr is a DFSR/IFSR value for the long descriptor
2057          * translation table format, but with WnR always clear.
2058          * Convert it to a 64-bit PAR.
2059          */
2060         par64 = (1 << 11); /* LPAE bit always set */
2061         if (!ret) {
2062             par64 |= phys_addr & ~0xfffULL;
2063             if (!attrs.secure) {
2064                 par64 |= (1 << 9); /* NS */
2065             }
2066             /* We don't set the ATTR or SH fields in the PAR. */
2067         } else {
2068             par64 |= 1; /* F */
2069             par64 |= (fsr & 0x3f) << 1; /* FS */
2070             /* Note that S2WLK and FSTAGE are always zero, because we don't
2071              * implement virtualization and therefore there can't be a stage 2
2072              * fault.
2073              */
2074         }
2075     } else {
2076         /* fsr is a DFSR/IFSR value for the short descriptor
2077          * translation table format (with WnR always clear).
2078          * Convert it to a 32-bit PAR.
2079          */
2080         if (!ret) {
2081             /* We do not set any attribute bits in the PAR */
2082             if (page_size == (1 << 24)
2083                 && arm_feature(env, ARM_FEATURE_V7)) {
2084                 par64 = (phys_addr & 0xff000000) | (1 << 1);
2085             } else {
2086                 par64 = phys_addr & 0xfffff000;
2087             }
2088             if (!attrs.secure) {
2089                 par64 |= (1 << 9); /* NS */
2090             }
2091         } else {
2092             par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
2093                     ((fsr & 0xf) << 1) | 1;
2094         }
2095     }
2096     return par64;
2097 }
2098 
2099 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2100 {
2101     int access_type = ri->opc2 & 1;
2102     uint64_t par64;
2103     ARMMMUIdx mmu_idx;
2104     int el = arm_current_el(env);
2105     bool secure = arm_is_secure_below_el3(env);
2106 
2107     switch (ri->opc2 & 6) {
2108     case 0:
2109         /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2110         switch (el) {
2111         case 3:
2112             mmu_idx = ARMMMUIdx_S1E3;
2113             break;
2114         case 2:
2115             mmu_idx = ARMMMUIdx_S1NSE1;
2116             break;
2117         case 1:
2118             mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2119             break;
2120         default:
2121             g_assert_not_reached();
2122         }
2123         break;
2124     case 2:
2125         /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2126         switch (el) {
2127         case 3:
2128             mmu_idx = ARMMMUIdx_S1SE0;
2129             break;
2130         case 2:
2131             mmu_idx = ARMMMUIdx_S1NSE0;
2132             break;
2133         case 1:
2134             mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2135             break;
2136         default:
2137             g_assert_not_reached();
2138         }
2139         break;
2140     case 4:
2141         /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2142         mmu_idx = ARMMMUIdx_S12NSE1;
2143         break;
2144     case 6:
2145         /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2146         mmu_idx = ARMMMUIdx_S12NSE0;
2147         break;
2148     default:
2149         g_assert_not_reached();
2150     }
2151 
2152     par64 = do_ats_write(env, value, access_type, mmu_idx);
2153 
2154     A32_BANKED_CURRENT_REG_SET(env, par, par64);
2155 }
2156 
2157 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
2158                         uint64_t value)
2159 {
2160     int access_type = ri->opc2 & 1;
2161     uint64_t par64;
2162 
2163     par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
2164 
2165     A32_BANKED_CURRENT_REG_SET(env, par, par64);
2166 }
2167 
2168 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
2169                                      bool isread)
2170 {
2171     if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
2172         return CP_ACCESS_TRAP;
2173     }
2174     return CP_ACCESS_OK;
2175 }
2176 
2177 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
2178                         uint64_t value)
2179 {
2180     int access_type = ri->opc2 & 1;
2181     ARMMMUIdx mmu_idx;
2182     int secure = arm_is_secure_below_el3(env);
2183 
2184     switch (ri->opc2 & 6) {
2185     case 0:
2186         switch (ri->opc1) {
2187         case 0: /* AT S1E1R, AT S1E1W */
2188             mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2189             break;
2190         case 4: /* AT S1E2R, AT S1E2W */
2191             mmu_idx = ARMMMUIdx_S1E2;
2192             break;
2193         case 6: /* AT S1E3R, AT S1E3W */
2194             mmu_idx = ARMMMUIdx_S1E3;
2195             break;
2196         default:
2197             g_assert_not_reached();
2198         }
2199         break;
2200     case 2: /* AT S1E0R, AT S1E0W */
2201         mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2202         break;
2203     case 4: /* AT S12E1R, AT S12E1W */
2204         mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
2205         break;
2206     case 6: /* AT S12E0R, AT S12E0W */
2207         mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
2208         break;
2209     default:
2210         g_assert_not_reached();
2211     }
2212 
2213     env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
2214 }
2215 #endif
2216 
2217 static const ARMCPRegInfo vapa_cp_reginfo[] = {
2218     { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
2219       .access = PL1_RW, .resetvalue = 0,
2220       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
2221                              offsetoflow32(CPUARMState, cp15.par_ns) },
2222       .writefn = par_write },
2223 #ifndef CONFIG_USER_ONLY
2224     /* This underdecoding is safe because the reginfo is NO_RAW. */
2225     { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
2226       .access = PL1_W, .accessfn = ats_access,
2227       .writefn = ats_write, .type = ARM_CP_NO_RAW },
2228 #endif
2229     REGINFO_SENTINEL
2230 };
2231 
2232 /* Return basic MPU access permission bits.  */
2233 static uint32_t simple_mpu_ap_bits(uint32_t val)
2234 {
2235     uint32_t ret;
2236     uint32_t mask;
2237     int i;
2238     ret = 0;
2239     mask = 3;
2240     for (i = 0; i < 16; i += 2) {
2241         ret |= (val >> i) & mask;
2242         mask <<= 2;
2243     }
2244     return ret;
2245 }
2246 
2247 /* Pad basic MPU access permission bits to extended format.  */
2248 static uint32_t extended_mpu_ap_bits(uint32_t val)
2249 {
2250     uint32_t ret;
2251     uint32_t mask;
2252     int i;
2253     ret = 0;
2254     mask = 3;
2255     for (i = 0; i < 16; i += 2) {
2256         ret |= (val & mask) << i;
2257         mask <<= 2;
2258     }
2259     return ret;
2260 }
2261 
2262 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2263                                  uint64_t value)
2264 {
2265     env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2266 }
2267 
2268 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2269 {
2270     return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2271 }
2272 
2273 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2274                                  uint64_t value)
2275 {
2276     env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2277 }
2278 
2279 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2280 {
2281     return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2282 }
2283 
2284 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2285 {
2286     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2287 
2288     if (!u32p) {
2289         return 0;
2290     }
2291 
2292     u32p += env->cp15.c6_rgnr;
2293     return *u32p;
2294 }
2295 
2296 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2297                          uint64_t value)
2298 {
2299     ARMCPU *cpu = arm_env_get_cpu(env);
2300     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2301 
2302     if (!u32p) {
2303         return;
2304     }
2305 
2306     u32p += env->cp15.c6_rgnr;
2307     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2308     *u32p = value;
2309 }
2310 
2311 static void pmsav7_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2312 {
2313     ARMCPU *cpu = arm_env_get_cpu(env);
2314     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2315 
2316     if (!u32p) {
2317         return;
2318     }
2319 
2320     memset(u32p, 0, sizeof(*u32p) * cpu->pmsav7_dregion);
2321 }
2322 
2323 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2324                               uint64_t value)
2325 {
2326     ARMCPU *cpu = arm_env_get_cpu(env);
2327     uint32_t nrgs = cpu->pmsav7_dregion;
2328 
2329     if (value >= nrgs) {
2330         qemu_log_mask(LOG_GUEST_ERROR,
2331                       "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2332                       " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2333         return;
2334     }
2335 
2336     raw_write(env, ri, value);
2337 }
2338 
2339 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2340     { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2341       .access = PL1_RW, .type = ARM_CP_NO_RAW,
2342       .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2343       .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2344     { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2345       .access = PL1_RW, .type = ARM_CP_NO_RAW,
2346       .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2347       .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2348     { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2349       .access = PL1_RW, .type = ARM_CP_NO_RAW,
2350       .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2351       .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2352     { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2353       .access = PL1_RW,
2354       .fieldoffset = offsetof(CPUARMState, cp15.c6_rgnr),
2355       .writefn = pmsav7_rgnr_write },
2356     REGINFO_SENTINEL
2357 };
2358 
2359 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2360     { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2361       .access = PL1_RW, .type = ARM_CP_ALIAS,
2362       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2363       .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2364     { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2365       .access = PL1_RW, .type = ARM_CP_ALIAS,
2366       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2367       .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2368     { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2369       .access = PL1_RW,
2370       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2371       .resetvalue = 0, },
2372     { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2373       .access = PL1_RW,
2374       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2375       .resetvalue = 0, },
2376     { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2377       .access = PL1_RW,
2378       .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2379     { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2380       .access = PL1_RW,
2381       .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2382     /* Protection region base and size registers */
2383     { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2384       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2385       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2386     { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2387       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2388       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2389     { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2390       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2391       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2392     { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2393       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2394       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2395     { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2396       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2397       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2398     { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2399       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2400       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2401     { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2402       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2403       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2404     { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2405       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2406       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2407     REGINFO_SENTINEL
2408 };
2409 
2410 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
2411                                  uint64_t value)
2412 {
2413     TCR *tcr = raw_ptr(env, ri);
2414     int maskshift = extract32(value, 0, 3);
2415 
2416     if (!arm_feature(env, ARM_FEATURE_V8)) {
2417         if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2418             /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2419              * using Long-desciptor translation table format */
2420             value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2421         } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2422             /* In an implementation that includes the Security Extensions
2423              * TTBCR has additional fields PD0 [4] and PD1 [5] for
2424              * Short-descriptor translation table format.
2425              */
2426             value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2427         } else {
2428             value &= TTBCR_N;
2429         }
2430     }
2431 
2432     /* Update the masks corresponding to the TCR bank being written
2433      * Note that we always calculate mask and base_mask, but
2434      * they are only used for short-descriptor tables (ie if EAE is 0);
2435      * for long-descriptor tables the TCR fields are used differently
2436      * and the mask and base_mask values are meaningless.
2437      */
2438     tcr->raw_tcr = value;
2439     tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
2440     tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
2441 }
2442 
2443 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2444                              uint64_t value)
2445 {
2446     ARMCPU *cpu = arm_env_get_cpu(env);
2447 
2448     if (arm_feature(env, ARM_FEATURE_LPAE)) {
2449         /* With LPAE the TTBCR could result in a change of ASID
2450          * via the TTBCR.A1 bit, so do a TLB flush.
2451          */
2452         tlb_flush(CPU(cpu));
2453     }
2454     vmsa_ttbcr_raw_write(env, ri, value);
2455 }
2456 
2457 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2458 {
2459     TCR *tcr = raw_ptr(env, ri);
2460 
2461     /* Reset both the TCR as well as the masks corresponding to the bank of
2462      * the TCR being reset.
2463      */
2464     tcr->raw_tcr = 0;
2465     tcr->mask = 0;
2466     tcr->base_mask = 0xffffc000u;
2467 }
2468 
2469 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2470                                uint64_t value)
2471 {
2472     ARMCPU *cpu = arm_env_get_cpu(env);
2473     TCR *tcr = raw_ptr(env, ri);
2474 
2475     /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2476     tlb_flush(CPU(cpu));
2477     tcr->raw_tcr = value;
2478 }
2479 
2480 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2481                             uint64_t value)
2482 {
2483     /* 64 bit accesses to the TTBRs can change the ASID and so we
2484      * must flush the TLB.
2485      */
2486     if (cpreg_field_is_64bit(ri)) {
2487         ARMCPU *cpu = arm_env_get_cpu(env);
2488 
2489         tlb_flush(CPU(cpu));
2490     }
2491     raw_write(env, ri, value);
2492 }
2493 
2494 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2495                         uint64_t value)
2496 {
2497     ARMCPU *cpu = arm_env_get_cpu(env);
2498     CPUState *cs = CPU(cpu);
2499 
2500     /* Accesses to VTTBR may change the VMID so we must flush the TLB.  */
2501     if (raw_read(env, ri) != value) {
2502         tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
2503                             ARMMMUIdx_S2NS, -1);
2504         raw_write(env, ri, value);
2505     }
2506 }
2507 
2508 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2509     { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2510       .access = PL1_RW, .type = ARM_CP_ALIAS,
2511       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2512                              offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2513     { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2514       .access = PL1_RW, .resetvalue = 0,
2515       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2516                              offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2517     { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2518       .access = PL1_RW, .resetvalue = 0,
2519       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2520                              offsetof(CPUARMState, cp15.dfar_ns) } },
2521     { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2522       .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2523       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2524       .resetvalue = 0, },
2525     REGINFO_SENTINEL
2526 };
2527 
2528 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2529     { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2530       .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2531       .access = PL1_RW,
2532       .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2533     { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2534       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2535       .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2536       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2537                              offsetof(CPUARMState, cp15.ttbr0_ns) } },
2538     { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2539       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2540       .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2541       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2542                              offsetof(CPUARMState, cp15.ttbr1_ns) } },
2543     { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2544       .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2545       .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
2546       .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
2547       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2548     { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2549       .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2550       .raw_writefn = vmsa_ttbcr_raw_write,
2551       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2552                              offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2553     REGINFO_SENTINEL
2554 };
2555 
2556 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2557                                 uint64_t value)
2558 {
2559     env->cp15.c15_ticonfig = value & 0xe7;
2560     /* The OS_TYPE bit in this register changes the reported CPUID! */
2561     env->cp15.c0_cpuid = (value & (1 << 5)) ?
2562         ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2563 }
2564 
2565 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2566                                 uint64_t value)
2567 {
2568     env->cp15.c15_threadid = value & 0xffff;
2569 }
2570 
2571 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2572                            uint64_t value)
2573 {
2574     /* Wait-for-interrupt (deprecated) */
2575     cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
2576 }
2577 
2578 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2579                                   uint64_t value)
2580 {
2581     /* On OMAP there are registers indicating the max/min index of dcache lines
2582      * containing a dirty line; cache flush operations have to reset these.
2583      */
2584     env->cp15.c15_i_max = 0x000;
2585     env->cp15.c15_i_min = 0xff0;
2586 }
2587 
2588 static const ARMCPRegInfo omap_cp_reginfo[] = {
2589     { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2590       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2591       .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2592       .resetvalue = 0, },
2593     { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2594       .access = PL1_RW, .type = ARM_CP_NOP },
2595     { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2596       .access = PL1_RW,
2597       .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2598       .writefn = omap_ticonfig_write },
2599     { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2600       .access = PL1_RW,
2601       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2602     { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2603       .access = PL1_RW, .resetvalue = 0xff0,
2604       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2605     { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2606       .access = PL1_RW,
2607       .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2608       .writefn = omap_threadid_write },
2609     { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2610       .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2611       .type = ARM_CP_NO_RAW,
2612       .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2613     /* TODO: Peripheral port remap register:
2614      * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2615      * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2616      * when MMU is off.
2617      */
2618     { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2619       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2620       .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2621       .writefn = omap_cachemaint_write },
2622     { .name = "C9", .cp = 15, .crn = 9,
2623       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2624       .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2625     REGINFO_SENTINEL
2626 };
2627 
2628 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2629                               uint64_t value)
2630 {
2631     env->cp15.c15_cpar = value & 0x3fff;
2632 }
2633 
2634 static const ARMCPRegInfo xscale_cp_reginfo[] = {
2635     { .name = "XSCALE_CPAR",
2636       .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2637       .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
2638       .writefn = xscale_cpar_write, },
2639     { .name = "XSCALE_AUXCR",
2640       .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
2641       .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
2642       .resetvalue = 0, },
2643     /* XScale specific cache-lockdown: since we have no cache we NOP these
2644      * and hope the guest does not really rely on cache behaviour.
2645      */
2646     { .name = "XSCALE_LOCK_ICACHE_LINE",
2647       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
2648       .access = PL1_W, .type = ARM_CP_NOP },
2649     { .name = "XSCALE_UNLOCK_ICACHE",
2650       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
2651       .access = PL1_W, .type = ARM_CP_NOP },
2652     { .name = "XSCALE_DCACHE_LOCK",
2653       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2654       .access = PL1_RW, .type = ARM_CP_NOP },
2655     { .name = "XSCALE_UNLOCK_DCACHE",
2656       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2657       .access = PL1_W, .type = ARM_CP_NOP },
2658     REGINFO_SENTINEL
2659 };
2660 
2661 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2662     /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2663      * implementation of this implementation-defined space.
2664      * Ideally this should eventually disappear in favour of actually
2665      * implementing the correct behaviour for all cores.
2666      */
2667     { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2668       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2669       .access = PL1_RW,
2670       .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2671       .resetvalue = 0 },
2672     REGINFO_SENTINEL
2673 };
2674 
2675 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2676     /* Cache status: RAZ because we have no cache so it's always clean */
2677     { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2678       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2679       .resetvalue = 0 },
2680     REGINFO_SENTINEL
2681 };
2682 
2683 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2684     /* We never have a a block transfer operation in progress */
2685     { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2686       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2687       .resetvalue = 0 },
2688     /* The cache ops themselves: these all NOP for QEMU */
2689     { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2690       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2691     { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2692       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2693     { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2694       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2695     { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2696       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2697     { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2698       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2699     { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2700       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2701     REGINFO_SENTINEL
2702 };
2703 
2704 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2705     /* The cache test-and-clean instructions always return (1 << 30)
2706      * to indicate that there are no dirty cache lines.
2707      */
2708     { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
2709       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2710       .resetvalue = (1 << 30) },
2711     { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
2712       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2713       .resetvalue = (1 << 30) },
2714     REGINFO_SENTINEL
2715 };
2716 
2717 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
2718     /* Ignore ReadBuffer accesses */
2719     { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
2720       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2721       .access = PL1_RW, .resetvalue = 0,
2722       .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
2723     REGINFO_SENTINEL
2724 };
2725 
2726 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2727 {
2728     ARMCPU *cpu = arm_env_get_cpu(env);
2729     unsigned int cur_el = arm_current_el(env);
2730     bool secure = arm_is_secure(env);
2731 
2732     if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2733         return env->cp15.vpidr_el2;
2734     }
2735     return raw_read(env, ri);
2736 }
2737 
2738 static uint64_t mpidr_read_val(CPUARMState *env)
2739 {
2740     ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
2741     uint64_t mpidr = cpu->mp_affinity;
2742 
2743     if (arm_feature(env, ARM_FEATURE_V7MP)) {
2744         mpidr |= (1U << 31);
2745         /* Cores which are uniprocessor (non-coherent)
2746          * but still implement the MP extensions set
2747          * bit 30. (For instance, Cortex-R5).
2748          */
2749         if (cpu->mp_is_up) {
2750             mpidr |= (1u << 30);
2751         }
2752     }
2753     return mpidr;
2754 }
2755 
2756 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2757 {
2758     unsigned int cur_el = arm_current_el(env);
2759     bool secure = arm_is_secure(env);
2760 
2761     if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2762         return env->cp15.vmpidr_el2;
2763     }
2764     return mpidr_read_val(env);
2765 }
2766 
2767 static const ARMCPRegInfo mpidr_cp_reginfo[] = {
2768     { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
2769       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
2770       .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
2771     REGINFO_SENTINEL
2772 };
2773 
2774 static const ARMCPRegInfo lpae_cp_reginfo[] = {
2775     /* NOP AMAIR0/1 */
2776     { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
2777       .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
2778       .access = PL1_RW, .type = ARM_CP_CONST,
2779       .resetvalue = 0 },
2780     /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
2781     { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
2782       .access = PL1_RW, .type = ARM_CP_CONST,
2783       .resetvalue = 0 },
2784     { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
2785       .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
2786       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
2787                              offsetof(CPUARMState, cp15.par_ns)} },
2788     { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
2789       .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2790       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2791                              offsetof(CPUARMState, cp15.ttbr0_ns) },
2792       .writefn = vmsa_ttbr_write, },
2793     { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
2794       .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2795       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2796                              offsetof(CPUARMState, cp15.ttbr1_ns) },
2797       .writefn = vmsa_ttbr_write, },
2798     REGINFO_SENTINEL
2799 };
2800 
2801 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2802 {
2803     return vfp_get_fpcr(env);
2804 }
2805 
2806 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2807                             uint64_t value)
2808 {
2809     vfp_set_fpcr(env, value);
2810 }
2811 
2812 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2813 {
2814     return vfp_get_fpsr(env);
2815 }
2816 
2817 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2818                             uint64_t value)
2819 {
2820     vfp_set_fpsr(env, value);
2821 }
2822 
2823 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
2824                                        bool isread)
2825 {
2826     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
2827         return CP_ACCESS_TRAP;
2828     }
2829     return CP_ACCESS_OK;
2830 }
2831 
2832 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
2833                             uint64_t value)
2834 {
2835     env->daif = value & PSTATE_DAIF;
2836 }
2837 
2838 static CPAccessResult aa64_cacheop_access(CPUARMState *env,
2839                                           const ARMCPRegInfo *ri,
2840                                           bool isread)
2841 {
2842     /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
2843      * SCTLR_EL1.UCI is set.
2844      */
2845     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
2846         return CP_ACCESS_TRAP;
2847     }
2848     return CP_ACCESS_OK;
2849 }
2850 
2851 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
2852  * Page D4-1736 (DDI0487A.b)
2853  */
2854 
2855 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2856                                     uint64_t value)
2857 {
2858     ARMCPU *cpu = arm_env_get_cpu(env);
2859     CPUState *cs = CPU(cpu);
2860 
2861     if (arm_is_secure_below_el3(env)) {
2862         tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2863     } else {
2864         tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
2865     }
2866 }
2867 
2868 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2869                                       uint64_t value)
2870 {
2871     bool sec = arm_is_secure_below_el3(env);
2872     CPUState *other_cs;
2873 
2874     CPU_FOREACH(other_cs) {
2875         if (sec) {
2876             tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2877         } else {
2878             tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
2879                                 ARMMMUIdx_S12NSE0, -1);
2880         }
2881     }
2882 }
2883 
2884 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2885                                   uint64_t value)
2886 {
2887     /* Note that the 'ALL' scope must invalidate both stage 1 and
2888      * stage 2 translations, whereas most other scopes only invalidate
2889      * stage 1 translations.
2890      */
2891     ARMCPU *cpu = arm_env_get_cpu(env);
2892     CPUState *cs = CPU(cpu);
2893 
2894     if (arm_is_secure_below_el3(env)) {
2895         tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2896     } else {
2897         if (arm_feature(env, ARM_FEATURE_EL2)) {
2898             tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
2899                                 ARMMMUIdx_S2NS, -1);
2900         } else {
2901             tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
2902         }
2903     }
2904 }
2905 
2906 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
2907                                   uint64_t value)
2908 {
2909     ARMCPU *cpu = arm_env_get_cpu(env);
2910     CPUState *cs = CPU(cpu);
2911 
2912     tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1);
2913 }
2914 
2915 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
2916                                   uint64_t value)
2917 {
2918     ARMCPU *cpu = arm_env_get_cpu(env);
2919     CPUState *cs = CPU(cpu);
2920 
2921     tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E3, -1);
2922 }
2923 
2924 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2925                                     uint64_t value)
2926 {
2927     /* Note that the 'ALL' scope must invalidate both stage 1 and
2928      * stage 2 translations, whereas most other scopes only invalidate
2929      * stage 1 translations.
2930      */
2931     bool sec = arm_is_secure_below_el3(env);
2932     bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
2933     CPUState *other_cs;
2934 
2935     CPU_FOREACH(other_cs) {
2936         if (sec) {
2937             tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2938         } else if (has_el2) {
2939             tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
2940                                 ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1);
2941         } else {
2942             tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
2943                                 ARMMMUIdx_S12NSE0, -1);
2944         }
2945     }
2946 }
2947 
2948 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2949                                     uint64_t value)
2950 {
2951     CPUState *other_cs;
2952 
2953     CPU_FOREACH(other_cs) {
2954         tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1);
2955     }
2956 }
2957 
2958 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2959                                     uint64_t value)
2960 {
2961     CPUState *other_cs;
2962 
2963     CPU_FOREACH(other_cs) {
2964         tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E3, -1);
2965     }
2966 }
2967 
2968 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2969                                  uint64_t value)
2970 {
2971     /* Invalidate by VA, EL1&0 (AArch64 version).
2972      * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
2973      * since we don't support flush-for-specific-ASID-only or
2974      * flush-last-level-only.
2975      */
2976     ARMCPU *cpu = arm_env_get_cpu(env);
2977     CPUState *cs = CPU(cpu);
2978     uint64_t pageaddr = sextract64(value << 12, 0, 56);
2979 
2980     if (arm_is_secure_below_el3(env)) {
2981         tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1SE1,
2982                                  ARMMMUIdx_S1SE0, -1);
2983     } else {
2984         tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S12NSE1,
2985                                  ARMMMUIdx_S12NSE0, -1);
2986     }
2987 }
2988 
2989 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
2990                                  uint64_t value)
2991 {
2992     /* Invalidate by VA, EL2
2993      * Currently handles both VAE2 and VALE2, since we don't support
2994      * flush-last-level-only.
2995      */
2996     ARMCPU *cpu = arm_env_get_cpu(env);
2997     CPUState *cs = CPU(cpu);
2998     uint64_t pageaddr = sextract64(value << 12, 0, 56);
2999 
3000     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E2, -1);
3001 }
3002 
3003 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3004                                  uint64_t value)
3005 {
3006     /* Invalidate by VA, EL3
3007      * Currently handles both VAE3 and VALE3, since we don't support
3008      * flush-last-level-only.
3009      */
3010     ARMCPU *cpu = arm_env_get_cpu(env);
3011     CPUState *cs = CPU(cpu);
3012     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3013 
3014     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E3, -1);
3015 }
3016 
3017 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3018                                    uint64_t value)
3019 {
3020     bool sec = arm_is_secure_below_el3(env);
3021     CPUState *other_cs;
3022     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3023 
3024     CPU_FOREACH(other_cs) {
3025         if (sec) {
3026             tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1SE1,
3027                                      ARMMMUIdx_S1SE0, -1);
3028         } else {
3029             tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S12NSE1,
3030                                      ARMMMUIdx_S12NSE0, -1);
3031         }
3032     }
3033 }
3034 
3035 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3036                                    uint64_t value)
3037 {
3038     CPUState *other_cs;
3039     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3040 
3041     CPU_FOREACH(other_cs) {
3042         tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E2, -1);
3043     }
3044 }
3045 
3046 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3047                                    uint64_t value)
3048 {
3049     CPUState *other_cs;
3050     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3051 
3052     CPU_FOREACH(other_cs) {
3053         tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E3, -1);
3054     }
3055 }
3056 
3057 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3058                                     uint64_t value)
3059 {
3060     /* Invalidate by IPA. This has to invalidate any structures that
3061      * contain only stage 2 translation information, but does not need
3062      * to apply to structures that contain combined stage 1 and stage 2
3063      * translation information.
3064      * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3065      */
3066     ARMCPU *cpu = arm_env_get_cpu(env);
3067     CPUState *cs = CPU(cpu);
3068     uint64_t pageaddr;
3069 
3070     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3071         return;
3072     }
3073 
3074     pageaddr = sextract64(value << 12, 0, 48);
3075 
3076     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S2NS, -1);
3077 }
3078 
3079 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3080                                       uint64_t value)
3081 {
3082     CPUState *other_cs;
3083     uint64_t pageaddr;
3084 
3085     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3086         return;
3087     }
3088 
3089     pageaddr = sextract64(value << 12, 0, 48);
3090 
3091     CPU_FOREACH(other_cs) {
3092         tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S2NS, -1);
3093     }
3094 }
3095 
3096 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
3097                                       bool isread)
3098 {
3099     /* We don't implement EL2, so the only control on DC ZVA is the
3100      * bit in the SCTLR which can prohibit access for EL0.
3101      */
3102     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
3103         return CP_ACCESS_TRAP;
3104     }
3105     return CP_ACCESS_OK;
3106 }
3107 
3108 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
3109 {
3110     ARMCPU *cpu = arm_env_get_cpu(env);
3111     int dzp_bit = 1 << 4;
3112 
3113     /* DZP indicates whether DC ZVA access is allowed */
3114     if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
3115         dzp_bit = 0;
3116     }
3117     return cpu->dcz_blocksize | dzp_bit;
3118 }
3119 
3120 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
3121                                     bool isread)
3122 {
3123     if (!(env->pstate & PSTATE_SP)) {
3124         /* Access to SP_EL0 is undefined if it's being used as
3125          * the stack pointer.
3126          */
3127         return CP_ACCESS_TRAP_UNCATEGORIZED;
3128     }
3129     return CP_ACCESS_OK;
3130 }
3131 
3132 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
3133 {
3134     return env->pstate & PSTATE_SP;
3135 }
3136 
3137 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
3138 {
3139     update_spsel(env, val);
3140 }
3141 
3142 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3143                         uint64_t value)
3144 {
3145     ARMCPU *cpu = arm_env_get_cpu(env);
3146 
3147     if (raw_read(env, ri) == value) {
3148         /* Skip the TLB flush if nothing actually changed; Linux likes
3149          * to do a lot of pointless SCTLR writes.
3150          */
3151         return;
3152     }
3153 
3154     raw_write(env, ri, value);
3155     /* ??? Lots of these bits are not implemented.  */
3156     /* This may enable/disable the MMU, so do a TLB flush.  */
3157     tlb_flush(CPU(cpu));
3158 }
3159 
3160 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
3161                                      bool isread)
3162 {
3163     if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
3164         return CP_ACCESS_TRAP_FP_EL2;
3165     }
3166     if (env->cp15.cptr_el[3] & CPTR_TFP) {
3167         return CP_ACCESS_TRAP_FP_EL3;
3168     }
3169     return CP_ACCESS_OK;
3170 }
3171 
3172 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3173                        uint64_t value)
3174 {
3175     env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
3176 }
3177 
3178 static const ARMCPRegInfo v8_cp_reginfo[] = {
3179     /* Minimal set of EL0-visible registers. This will need to be expanded
3180      * significantly for system emulation of AArch64 CPUs.
3181      */
3182     { .name = "NZCV", .state = ARM_CP_STATE_AA64,
3183       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
3184       .access = PL0_RW, .type = ARM_CP_NZCV },
3185     { .name = "DAIF", .state = ARM_CP_STATE_AA64,
3186       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
3187       .type = ARM_CP_NO_RAW,
3188       .access = PL0_RW, .accessfn = aa64_daif_access,
3189       .fieldoffset = offsetof(CPUARMState, daif),
3190       .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
3191     { .name = "FPCR", .state = ARM_CP_STATE_AA64,
3192       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
3193       .access = PL0_RW, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
3194     { .name = "FPSR", .state = ARM_CP_STATE_AA64,
3195       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
3196       .access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
3197     { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
3198       .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
3199       .access = PL0_R, .type = ARM_CP_NO_RAW,
3200       .readfn = aa64_dczid_read },
3201     { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
3202       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
3203       .access = PL0_W, .type = ARM_CP_DC_ZVA,
3204 #ifndef CONFIG_USER_ONLY
3205       /* Avoid overhead of an access check that always passes in user-mode */
3206       .accessfn = aa64_zva_access,
3207 #endif
3208     },
3209     { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
3210       .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
3211       .access = PL1_R, .type = ARM_CP_CURRENTEL },
3212     /* Cache ops: all NOPs since we don't emulate caches */
3213     { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
3214       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3215       .access = PL1_W, .type = ARM_CP_NOP },
3216     { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
3217       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3218       .access = PL1_W, .type = ARM_CP_NOP },
3219     { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
3220       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
3221       .access = PL0_W, .type = ARM_CP_NOP,
3222       .accessfn = aa64_cacheop_access },
3223     { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
3224       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3225       .access = PL1_W, .type = ARM_CP_NOP },
3226     { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
3227       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3228       .access = PL1_W, .type = ARM_CP_NOP },
3229     { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
3230       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
3231       .access = PL0_W, .type = ARM_CP_NOP,
3232       .accessfn = aa64_cacheop_access },
3233     { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
3234       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3235       .access = PL1_W, .type = ARM_CP_NOP },
3236     { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
3237       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
3238       .access = PL0_W, .type = ARM_CP_NOP,
3239       .accessfn = aa64_cacheop_access },
3240     { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
3241       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
3242       .access = PL0_W, .type = ARM_CP_NOP,
3243       .accessfn = aa64_cacheop_access },
3244     { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
3245       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3246       .access = PL1_W, .type = ARM_CP_NOP },
3247     /* TLBI operations */
3248     { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
3249       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
3250       .access = PL1_W, .type = ARM_CP_NO_RAW,
3251       .writefn = tlbi_aa64_vmalle1is_write },
3252     { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
3253       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
3254       .access = PL1_W, .type = ARM_CP_NO_RAW,
3255       .writefn = tlbi_aa64_vae1is_write },
3256     { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
3257       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
3258       .access = PL1_W, .type = ARM_CP_NO_RAW,
3259       .writefn = tlbi_aa64_vmalle1is_write },
3260     { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
3261       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
3262       .access = PL1_W, .type = ARM_CP_NO_RAW,
3263       .writefn = tlbi_aa64_vae1is_write },
3264     { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
3265       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3266       .access = PL1_W, .type = ARM_CP_NO_RAW,
3267       .writefn = tlbi_aa64_vae1is_write },
3268     { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
3269       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3270       .access = PL1_W, .type = ARM_CP_NO_RAW,
3271       .writefn = tlbi_aa64_vae1is_write },
3272     { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
3273       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
3274       .access = PL1_W, .type = ARM_CP_NO_RAW,
3275       .writefn = tlbi_aa64_vmalle1_write },
3276     { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
3277       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
3278       .access = PL1_W, .type = ARM_CP_NO_RAW,
3279       .writefn = tlbi_aa64_vae1_write },
3280     { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
3281       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
3282       .access = PL1_W, .type = ARM_CP_NO_RAW,
3283       .writefn = tlbi_aa64_vmalle1_write },
3284     { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
3285       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
3286       .access = PL1_W, .type = ARM_CP_NO_RAW,
3287       .writefn = tlbi_aa64_vae1_write },
3288     { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
3289       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3290       .access = PL1_W, .type = ARM_CP_NO_RAW,
3291       .writefn = tlbi_aa64_vae1_write },
3292     { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
3293       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3294       .access = PL1_W, .type = ARM_CP_NO_RAW,
3295       .writefn = tlbi_aa64_vae1_write },
3296     { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
3297       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3298       .access = PL2_W, .type = ARM_CP_NO_RAW,
3299       .writefn = tlbi_aa64_ipas2e1is_write },
3300     { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
3301       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3302       .access = PL2_W, .type = ARM_CP_NO_RAW,
3303       .writefn = tlbi_aa64_ipas2e1is_write },
3304     { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
3305       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
3306       .access = PL2_W, .type = ARM_CP_NO_RAW,
3307       .writefn = tlbi_aa64_alle1is_write },
3308     { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
3309       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
3310       .access = PL2_W, .type = ARM_CP_NO_RAW,
3311       .writefn = tlbi_aa64_alle1is_write },
3312     { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
3313       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3314       .access = PL2_W, .type = ARM_CP_NO_RAW,
3315       .writefn = tlbi_aa64_ipas2e1_write },
3316     { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
3317       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3318       .access = PL2_W, .type = ARM_CP_NO_RAW,
3319       .writefn = tlbi_aa64_ipas2e1_write },
3320     { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
3321       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
3322       .access = PL2_W, .type = ARM_CP_NO_RAW,
3323       .writefn = tlbi_aa64_alle1_write },
3324     { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
3325       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
3326       .access = PL2_W, .type = ARM_CP_NO_RAW,
3327       .writefn = tlbi_aa64_alle1is_write },
3328 #ifndef CONFIG_USER_ONLY
3329     /* 64 bit address translation operations */
3330     { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
3331       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
3332       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3333     { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
3334       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
3335       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3336     { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
3337       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
3338       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3339     { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
3340       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
3341       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3342     { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
3343       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
3344       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3345     { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
3346       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
3347       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3348     { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
3349       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
3350       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3351     { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
3352       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
3353       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3354     /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
3355     { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
3356       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
3357       .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3358     { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
3359       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
3360       .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3361     { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
3362       .type = ARM_CP_ALIAS,
3363       .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
3364       .access = PL1_RW, .resetvalue = 0,
3365       .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
3366       .writefn = par_write },
3367 #endif
3368     /* TLB invalidate last level of translation table walk */
3369     { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3370       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
3371     { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3372       .type = ARM_CP_NO_RAW, .access = PL1_W,
3373       .writefn = tlbimvaa_is_write },
3374     { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3375       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
3376     { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3377       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
3378     { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
3379       .type = ARM_CP_NO_RAW, .access = PL2_W,
3380       .writefn = tlbimva_hyp_write },
3381     { .name = "TLBIMVALHIS",
3382       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
3383       .type = ARM_CP_NO_RAW, .access = PL2_W,
3384       .writefn = tlbimva_hyp_is_write },
3385     { .name = "TLBIIPAS2",
3386       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3387       .type = ARM_CP_NO_RAW, .access = PL2_W,
3388       .writefn = tlbiipas2_write },
3389     { .name = "TLBIIPAS2IS",
3390       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3391       .type = ARM_CP_NO_RAW, .access = PL2_W,
3392       .writefn = tlbiipas2_is_write },
3393     { .name = "TLBIIPAS2L",
3394       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3395       .type = ARM_CP_NO_RAW, .access = PL2_W,
3396       .writefn = tlbiipas2_write },
3397     { .name = "TLBIIPAS2LIS",
3398       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3399       .type = ARM_CP_NO_RAW, .access = PL2_W,
3400       .writefn = tlbiipas2_is_write },
3401     /* 32 bit cache operations */
3402     { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3403       .type = ARM_CP_NOP, .access = PL1_W },
3404     { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
3405       .type = ARM_CP_NOP, .access = PL1_W },
3406     { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3407       .type = ARM_CP_NOP, .access = PL1_W },
3408     { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
3409       .type = ARM_CP_NOP, .access = PL1_W },
3410     { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
3411       .type = ARM_CP_NOP, .access = PL1_W },
3412     { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
3413       .type = ARM_CP_NOP, .access = PL1_W },
3414     { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3415       .type = ARM_CP_NOP, .access = PL1_W },
3416     { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3417       .type = ARM_CP_NOP, .access = PL1_W },
3418     { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
3419       .type = ARM_CP_NOP, .access = PL1_W },
3420     { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3421       .type = ARM_CP_NOP, .access = PL1_W },
3422     { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
3423       .type = ARM_CP_NOP, .access = PL1_W },
3424     { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
3425       .type = ARM_CP_NOP, .access = PL1_W },
3426     { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3427       .type = ARM_CP_NOP, .access = PL1_W },
3428     /* MMU Domain access control / MPU write buffer control */
3429     { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
3430       .access = PL1_RW, .resetvalue = 0,
3431       .writefn = dacr_write, .raw_writefn = raw_write,
3432       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
3433                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
3434     { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
3435       .type = ARM_CP_ALIAS,
3436       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
3437       .access = PL1_RW,
3438       .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
3439     { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
3440       .type = ARM_CP_ALIAS,
3441       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
3442       .access = PL1_RW,
3443       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
3444     /* We rely on the access checks not allowing the guest to write to the
3445      * state field when SPSel indicates that it's being used as the stack
3446      * pointer.
3447      */
3448     { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
3449       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
3450       .access = PL1_RW, .accessfn = sp_el0_access,
3451       .type = ARM_CP_ALIAS,
3452       .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
3453     { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
3454       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
3455       .access = PL2_RW, .type = ARM_CP_ALIAS,
3456       .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
3457     { .name = "SPSel", .state = ARM_CP_STATE_AA64,
3458       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
3459       .type = ARM_CP_NO_RAW,
3460       .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
3461     { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
3462       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
3463       .type = ARM_CP_ALIAS,
3464       .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
3465       .access = PL2_RW, .accessfn = fpexc32_access },
3466     { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
3467       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
3468       .access = PL2_RW, .resetvalue = 0,
3469       .writefn = dacr_write, .raw_writefn = raw_write,
3470       .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
3471     { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
3472       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
3473       .access = PL2_RW, .resetvalue = 0,
3474       .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
3475     { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
3476       .type = ARM_CP_ALIAS,
3477       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
3478       .access = PL2_RW,
3479       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
3480     { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
3481       .type = ARM_CP_ALIAS,
3482       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
3483       .access = PL2_RW,
3484       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
3485     { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
3486       .type = ARM_CP_ALIAS,
3487       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
3488       .access = PL2_RW,
3489       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
3490     { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
3491       .type = ARM_CP_ALIAS,
3492       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
3493       .access = PL2_RW,
3494       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
3495     { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
3496       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
3497       .resetvalue = 0,
3498       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
3499     { .name = "SDCR", .type = ARM_CP_ALIAS,
3500       .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
3501       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3502       .writefn = sdcr_write,
3503       .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
3504     REGINFO_SENTINEL
3505 };
3506 
3507 /* Used to describe the behaviour of EL2 regs when EL2 does not exist.  */
3508 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
3509     { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
3510       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
3511       .access = PL2_RW,
3512       .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
3513     { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
3514       .type = ARM_CP_NO_RAW,
3515       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3516       .access = PL2_RW,
3517       .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
3518     { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
3519       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
3520       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3521     { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
3522       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
3523       .access = PL2_RW, .type = ARM_CP_CONST,
3524       .resetvalue = 0 },
3525     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3526       .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
3527       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3528     { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
3529       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
3530       .access = PL2_RW, .type = ARM_CP_CONST,
3531       .resetvalue = 0 },
3532     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3533       .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
3534       .access = PL2_RW, .type = ARM_CP_CONST,
3535       .resetvalue = 0 },
3536     { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
3537       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
3538       .access = PL2_RW, .type = ARM_CP_CONST,
3539       .resetvalue = 0 },
3540     { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
3541       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
3542       .access = PL2_RW, .type = ARM_CP_CONST,
3543       .resetvalue = 0 },
3544     { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
3545       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
3546       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3547     { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
3548       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3549       .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3550       .type = ARM_CP_CONST, .resetvalue = 0 },
3551     { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
3552       .cp = 15, .opc1 = 6, .crm = 2,
3553       .access = PL2_RW, .accessfn = access_el3_aa32ns,
3554       .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
3555     { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
3556       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
3557       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3558     { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
3559       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
3560       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3561     { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
3562       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
3563       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3564     { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
3565       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
3566       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3567     { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
3568       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3569       .resetvalue = 0 },
3570     { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
3571       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
3572       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3573     { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
3574       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
3575       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3576     { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
3577       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3578       .resetvalue = 0 },
3579     { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
3580       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
3581       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3582     { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
3583       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3584       .resetvalue = 0 },
3585     { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
3586       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
3587       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3588     { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
3589       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
3590       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3591     { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
3592       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
3593       .access = PL2_RW, .accessfn = access_tda,
3594       .type = ARM_CP_CONST, .resetvalue = 0 },
3595     { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
3596       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3597       .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3598       .type = ARM_CP_CONST, .resetvalue = 0 },
3599     { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
3600       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
3601       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3602     REGINFO_SENTINEL
3603 };
3604 
3605 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3606 {
3607     ARMCPU *cpu = arm_env_get_cpu(env);
3608     uint64_t valid_mask = HCR_MASK;
3609 
3610     if (arm_feature(env, ARM_FEATURE_EL3)) {
3611         valid_mask &= ~HCR_HCD;
3612     } else {
3613         valid_mask &= ~HCR_TSC;
3614     }
3615 
3616     /* Clear RES0 bits.  */
3617     value &= valid_mask;
3618 
3619     /* These bits change the MMU setup:
3620      * HCR_VM enables stage 2 translation
3621      * HCR_PTW forbids certain page-table setups
3622      * HCR_DC Disables stage1 and enables stage2 translation
3623      */
3624     if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
3625         tlb_flush(CPU(cpu));
3626     }
3627     raw_write(env, ri, value);
3628 }
3629 
3630 static const ARMCPRegInfo el2_cp_reginfo[] = {
3631     { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
3632       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3633       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
3634       .writefn = hcr_write },
3635     { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
3636       .type = ARM_CP_ALIAS,
3637       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
3638       .access = PL2_RW,
3639       .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
3640     { .name = "ESR_EL2", .state = ARM_CP_STATE_AA64,
3641       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
3642       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
3643     { .name = "FAR_EL2", .state = ARM_CP_STATE_AA64,
3644       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
3645       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
3646     { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
3647       .type = ARM_CP_ALIAS,
3648       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
3649       .access = PL2_RW,
3650       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
3651     { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
3652       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
3653       .access = PL2_RW, .writefn = vbar_write,
3654       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
3655       .resetvalue = 0 },
3656     { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
3657       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
3658       .access = PL3_RW, .type = ARM_CP_ALIAS,
3659       .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
3660     { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
3661       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
3662       .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
3663       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) },
3664     { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
3665       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
3666       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
3667       .resetvalue = 0 },
3668     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3669       .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
3670       .access = PL2_RW, .type = ARM_CP_ALIAS,
3671       .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
3672     { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
3673       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
3674       .access = PL2_RW, .type = ARM_CP_CONST,
3675       .resetvalue = 0 },
3676     /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
3677     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3678       .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
3679       .access = PL2_RW, .type = ARM_CP_CONST,
3680       .resetvalue = 0 },
3681     { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
3682       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
3683       .access = PL2_RW, .type = ARM_CP_CONST,
3684       .resetvalue = 0 },
3685     { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
3686       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
3687       .access = PL2_RW, .type = ARM_CP_CONST,
3688       .resetvalue = 0 },
3689     { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
3690       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
3691       .access = PL2_RW,
3692       /* no .writefn needed as this can't cause an ASID change;
3693        * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3694        */
3695       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
3696     { .name = "VTCR", .state = ARM_CP_STATE_AA32,
3697       .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3698       .type = ARM_CP_ALIAS,
3699       .access = PL2_RW, .accessfn = access_el3_aa32ns,
3700       .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
3701     { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
3702       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3703       .access = PL2_RW,
3704       /* no .writefn needed as this can't cause an ASID change;
3705        * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3706        */
3707       .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
3708     { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
3709       .cp = 15, .opc1 = 6, .crm = 2,
3710       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3711       .access = PL2_RW, .accessfn = access_el3_aa32ns,
3712       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
3713       .writefn = vttbr_write },
3714     { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
3715       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
3716       .access = PL2_RW, .writefn = vttbr_write,
3717       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
3718     { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
3719       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
3720       .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
3721       .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
3722     { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
3723       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
3724       .access = PL2_RW, .resetvalue = 0,
3725       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
3726     { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
3727       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
3728       .access = PL2_RW, .resetvalue = 0,
3729       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
3730     { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
3731       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3732       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
3733     { .name = "TLBIALLNSNH",
3734       .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
3735       .type = ARM_CP_NO_RAW, .access = PL2_W,
3736       .writefn = tlbiall_nsnh_write },
3737     { .name = "TLBIALLNSNHIS",
3738       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
3739       .type = ARM_CP_NO_RAW, .access = PL2_W,
3740       .writefn = tlbiall_nsnh_is_write },
3741     { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
3742       .type = ARM_CP_NO_RAW, .access = PL2_W,
3743       .writefn = tlbiall_hyp_write },
3744     { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
3745       .type = ARM_CP_NO_RAW, .access = PL2_W,
3746       .writefn = tlbiall_hyp_is_write },
3747     { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
3748       .type = ARM_CP_NO_RAW, .access = PL2_W,
3749       .writefn = tlbimva_hyp_write },
3750     { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
3751       .type = ARM_CP_NO_RAW, .access = PL2_W,
3752       .writefn = tlbimva_hyp_is_write },
3753     { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
3754       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
3755       .type = ARM_CP_NO_RAW, .access = PL2_W,
3756       .writefn = tlbi_aa64_alle2_write },
3757     { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
3758       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
3759       .type = ARM_CP_NO_RAW, .access = PL2_W,
3760       .writefn = tlbi_aa64_vae2_write },
3761     { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
3762       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
3763       .access = PL2_W, .type = ARM_CP_NO_RAW,
3764       .writefn = tlbi_aa64_vae2_write },
3765     { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
3766       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
3767       .access = PL2_W, .type = ARM_CP_NO_RAW,
3768       .writefn = tlbi_aa64_alle2is_write },
3769     { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
3770       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
3771       .type = ARM_CP_NO_RAW, .access = PL2_W,
3772       .writefn = tlbi_aa64_vae2is_write },
3773     { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
3774       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
3775       .access = PL2_W, .type = ARM_CP_NO_RAW,
3776       .writefn = tlbi_aa64_vae2is_write },
3777 #ifndef CONFIG_USER_ONLY
3778     /* Unlike the other EL2-related AT operations, these must
3779      * UNDEF from EL3 if EL2 is not implemented, which is why we
3780      * define them here rather than with the rest of the AT ops.
3781      */
3782     { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
3783       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
3784       .access = PL2_W, .accessfn = at_s1e2_access,
3785       .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3786     { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
3787       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
3788       .access = PL2_W, .accessfn = at_s1e2_access,
3789       .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3790     /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
3791      * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
3792      * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
3793      * to behave as if SCR.NS was 1.
3794      */
3795     { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
3796       .access = PL2_W,
3797       .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
3798     { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
3799       .access = PL2_W,
3800       .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
3801     { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
3802       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
3803       /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
3804        * reset values as IMPDEF. We choose to reset to 3 to comply with
3805        * both ARMv7 and ARMv8.
3806        */
3807       .access = PL2_RW, .resetvalue = 3,
3808       .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
3809     { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
3810       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
3811       .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
3812       .writefn = gt_cntvoff_write,
3813       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
3814     { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
3815       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
3816       .writefn = gt_cntvoff_write,
3817       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
3818     { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
3819       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
3820       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
3821       .type = ARM_CP_IO, .access = PL2_RW,
3822       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
3823     { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
3824       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
3825       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
3826       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
3827     { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
3828       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
3829       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
3830       .resetfn = gt_hyp_timer_reset,
3831       .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
3832     { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
3833       .type = ARM_CP_IO,
3834       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
3835       .access = PL2_RW,
3836       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
3837       .resetvalue = 0,
3838       .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
3839 #endif
3840     /* The only field of MDCR_EL2 that has a defined architectural reset value
3841      * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
3842      * don't impelment any PMU event counters, so using zero as a reset
3843      * value for MDCR_EL2 is okay
3844      */
3845     { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
3846       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
3847       .access = PL2_RW, .resetvalue = 0,
3848       .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
3849     { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
3850       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3851       .access = PL2_RW, .accessfn = access_el3_aa32ns,
3852       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
3853     { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
3854       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3855       .access = PL2_RW,
3856       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
3857     { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
3858       .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
3859       .access = PL2_RW,
3860       .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
3861     REGINFO_SENTINEL
3862 };
3863 
3864 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
3865                                    bool isread)
3866 {
3867     /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
3868      * At Secure EL1 it traps to EL3.
3869      */
3870     if (arm_current_el(env) == 3) {
3871         return CP_ACCESS_OK;
3872     }
3873     if (arm_is_secure_below_el3(env)) {
3874         return CP_ACCESS_TRAP_EL3;
3875     }
3876     /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
3877     if (isread) {
3878         return CP_ACCESS_OK;
3879     }
3880     return CP_ACCESS_TRAP_UNCATEGORIZED;
3881 }
3882 
3883 static const ARMCPRegInfo el3_cp_reginfo[] = {
3884     { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
3885       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
3886       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
3887       .resetvalue = 0, .writefn = scr_write },
3888     { .name = "SCR",  .type = ARM_CP_ALIAS,
3889       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
3890       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3891       .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
3892       .writefn = scr_write },
3893     { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
3894       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
3895       .access = PL3_RW, .resetvalue = 0,
3896       .fieldoffset = offsetof(CPUARMState, cp15.sder) },
3897     { .name = "SDER",
3898       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
3899       .access = PL3_RW, .resetvalue = 0,
3900       .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
3901     { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
3902       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3903       .writefn = vbar_write, .resetvalue = 0,
3904       .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
3905     { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
3906       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
3907       .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3908       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
3909     { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
3910       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
3911       .access = PL3_RW,
3912       /* no .writefn needed as this can't cause an ASID change;
3913        * we must provide a .raw_writefn and .resetfn because we handle
3914        * reset and migration for the AArch32 TTBCR(S), which might be
3915        * using mask and base_mask.
3916        */
3917       .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
3918       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
3919     { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
3920       .type = ARM_CP_ALIAS,
3921       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
3922       .access = PL3_RW,
3923       .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
3924     { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
3925       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
3926       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
3927     { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
3928       .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
3929       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
3930     { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
3931       .type = ARM_CP_ALIAS,
3932       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
3933       .access = PL3_RW,
3934       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
3935     { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
3936       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
3937       .access = PL3_RW, .writefn = vbar_write,
3938       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
3939       .resetvalue = 0 },
3940     { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
3941       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
3942       .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
3943       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
3944     { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
3945       .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
3946       .access = PL3_RW, .resetvalue = 0,
3947       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
3948     { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
3949       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
3950       .access = PL3_RW, .type = ARM_CP_CONST,
3951       .resetvalue = 0 },
3952     { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
3953       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
3954       .access = PL3_RW, .type = ARM_CP_CONST,
3955       .resetvalue = 0 },
3956     { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
3957       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
3958       .access = PL3_RW, .type = ARM_CP_CONST,
3959       .resetvalue = 0 },
3960     { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
3961       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
3962       .access = PL3_W, .type = ARM_CP_NO_RAW,
3963       .writefn = tlbi_aa64_alle3is_write },
3964     { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
3965       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
3966       .access = PL3_W, .type = ARM_CP_NO_RAW,
3967       .writefn = tlbi_aa64_vae3is_write },
3968     { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
3969       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
3970       .access = PL3_W, .type = ARM_CP_NO_RAW,
3971       .writefn = tlbi_aa64_vae3is_write },
3972     { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
3973       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
3974       .access = PL3_W, .type = ARM_CP_NO_RAW,
3975       .writefn = tlbi_aa64_alle3_write },
3976     { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
3977       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
3978       .access = PL3_W, .type = ARM_CP_NO_RAW,
3979       .writefn = tlbi_aa64_vae3_write },
3980     { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
3981       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
3982       .access = PL3_W, .type = ARM_CP_NO_RAW,
3983       .writefn = tlbi_aa64_vae3_write },
3984     REGINFO_SENTINEL
3985 };
3986 
3987 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
3988                                      bool isread)
3989 {
3990     /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
3991      * but the AArch32 CTR has its own reginfo struct)
3992      */
3993     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
3994         return CP_ACCESS_TRAP;
3995     }
3996     return CP_ACCESS_OK;
3997 }
3998 
3999 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4000                         uint64_t value)
4001 {
4002     /* Writes to OSLAR_EL1 may update the OS lock status, which can be
4003      * read via a bit in OSLSR_EL1.
4004      */
4005     int oslock;
4006 
4007     if (ri->state == ARM_CP_STATE_AA32) {
4008         oslock = (value == 0xC5ACCE55);
4009     } else {
4010         oslock = value & 1;
4011     }
4012 
4013     env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
4014 }
4015 
4016 static const ARMCPRegInfo debug_cp_reginfo[] = {
4017     /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
4018      * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
4019      * unlike DBGDRAR it is never accessible from EL0.
4020      * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
4021      * accessor.
4022      */
4023     { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
4024       .access = PL0_R, .accessfn = access_tdra,
4025       .type = ARM_CP_CONST, .resetvalue = 0 },
4026     { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
4027       .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
4028       .access = PL1_R, .accessfn = access_tdra,
4029       .type = ARM_CP_CONST, .resetvalue = 0 },
4030     { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
4031       .access = PL0_R, .accessfn = access_tdra,
4032       .type = ARM_CP_CONST, .resetvalue = 0 },
4033     /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
4034     { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
4035       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
4036       .access = PL1_RW, .accessfn = access_tda,
4037       .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
4038       .resetvalue = 0 },
4039     /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
4040      * We don't implement the configurable EL0 access.
4041      */
4042     { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
4043       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4044       .type = ARM_CP_ALIAS,
4045       .access = PL1_R, .accessfn = access_tda,
4046       .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
4047     { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
4048       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
4049       .access = PL1_W, .type = ARM_CP_NO_RAW,
4050       .accessfn = access_tdosa,
4051       .writefn = oslar_write },
4052     { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
4053       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
4054       .access = PL1_R, .resetvalue = 10,
4055       .accessfn = access_tdosa,
4056       .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
4057     /* Dummy OSDLR_EL1: 32-bit Linux will read this */
4058     { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
4059       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
4060       .access = PL1_RW, .accessfn = access_tdosa,
4061       .type = ARM_CP_NOP },
4062     /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
4063      * implement vector catch debug events yet.
4064      */
4065     { .name = "DBGVCR",
4066       .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
4067       .access = PL1_RW, .accessfn = access_tda,
4068       .type = ARM_CP_NOP },
4069     /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
4070      * to save and restore a 32-bit guest's DBGVCR)
4071      */
4072     { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
4073       .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
4074       .access = PL2_RW, .accessfn = access_tda,
4075       .type = ARM_CP_NOP },
4076     /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
4077      * Channel but Linux may try to access this register. The 32-bit
4078      * alias is DBGDCCINT.
4079      */
4080     { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
4081       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
4082       .access = PL1_RW, .accessfn = access_tda,
4083       .type = ARM_CP_NOP },
4084     REGINFO_SENTINEL
4085 };
4086 
4087 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
4088     /* 64 bit access versions of the (dummy) debug registers */
4089     { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
4090       .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4091     { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
4092       .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4093     REGINFO_SENTINEL
4094 };
4095 
4096 void hw_watchpoint_update(ARMCPU *cpu, int n)
4097 {
4098     CPUARMState *env = &cpu->env;
4099     vaddr len = 0;
4100     vaddr wvr = env->cp15.dbgwvr[n];
4101     uint64_t wcr = env->cp15.dbgwcr[n];
4102     int mask;
4103     int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
4104 
4105     if (env->cpu_watchpoint[n]) {
4106         cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
4107         env->cpu_watchpoint[n] = NULL;
4108     }
4109 
4110     if (!extract64(wcr, 0, 1)) {
4111         /* E bit clear : watchpoint disabled */
4112         return;
4113     }
4114 
4115     switch (extract64(wcr, 3, 2)) {
4116     case 0:
4117         /* LSC 00 is reserved and must behave as if the wp is disabled */
4118         return;
4119     case 1:
4120         flags |= BP_MEM_READ;
4121         break;
4122     case 2:
4123         flags |= BP_MEM_WRITE;
4124         break;
4125     case 3:
4126         flags |= BP_MEM_ACCESS;
4127         break;
4128     }
4129 
4130     /* Attempts to use both MASK and BAS fields simultaneously are
4131      * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
4132      * thus generating a watchpoint for every byte in the masked region.
4133      */
4134     mask = extract64(wcr, 24, 4);
4135     if (mask == 1 || mask == 2) {
4136         /* Reserved values of MASK; we must act as if the mask value was
4137          * some non-reserved value, or as if the watchpoint were disabled.
4138          * We choose the latter.
4139          */
4140         return;
4141     } else if (mask) {
4142         /* Watchpoint covers an aligned area up to 2GB in size */
4143         len = 1ULL << mask;
4144         /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
4145          * whether the watchpoint fires when the unmasked bits match; we opt
4146          * to generate the exceptions.
4147          */
4148         wvr &= ~(len - 1);
4149     } else {
4150         /* Watchpoint covers bytes defined by the byte address select bits */
4151         int bas = extract64(wcr, 5, 8);
4152         int basstart;
4153 
4154         if (bas == 0) {
4155             /* This must act as if the watchpoint is disabled */
4156             return;
4157         }
4158 
4159         if (extract64(wvr, 2, 1)) {
4160             /* Deprecated case of an only 4-aligned address. BAS[7:4] are
4161              * ignored, and BAS[3:0] define which bytes to watch.
4162              */
4163             bas &= 0xf;
4164         }
4165         /* The BAS bits are supposed to be programmed to indicate a contiguous
4166          * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
4167          * we fire for each byte in the word/doubleword addressed by the WVR.
4168          * We choose to ignore any non-zero bits after the first range of 1s.
4169          */
4170         basstart = ctz32(bas);
4171         len = cto32(bas >> basstart);
4172         wvr += basstart;
4173     }
4174 
4175     cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
4176                           &env->cpu_watchpoint[n]);
4177 }
4178 
4179 void hw_watchpoint_update_all(ARMCPU *cpu)
4180 {
4181     int i;
4182     CPUARMState *env = &cpu->env;
4183 
4184     /* Completely clear out existing QEMU watchpoints and our array, to
4185      * avoid possible stale entries following migration load.
4186      */
4187     cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
4188     memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
4189 
4190     for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
4191         hw_watchpoint_update(cpu, i);
4192     }
4193 }
4194 
4195 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4196                          uint64_t value)
4197 {
4198     ARMCPU *cpu = arm_env_get_cpu(env);
4199     int i = ri->crm;
4200 
4201     /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
4202      * register reads and behaves as if values written are sign extended.
4203      * Bits [1:0] are RES0.
4204      */
4205     value = sextract64(value, 0, 49) & ~3ULL;
4206 
4207     raw_write(env, ri, value);
4208     hw_watchpoint_update(cpu, i);
4209 }
4210 
4211 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4212                          uint64_t value)
4213 {
4214     ARMCPU *cpu = arm_env_get_cpu(env);
4215     int i = ri->crm;
4216 
4217     raw_write(env, ri, value);
4218     hw_watchpoint_update(cpu, i);
4219 }
4220 
4221 void hw_breakpoint_update(ARMCPU *cpu, int n)
4222 {
4223     CPUARMState *env = &cpu->env;
4224     uint64_t bvr = env->cp15.dbgbvr[n];
4225     uint64_t bcr = env->cp15.dbgbcr[n];
4226     vaddr addr;
4227     int bt;
4228     int flags = BP_CPU;
4229 
4230     if (env->cpu_breakpoint[n]) {
4231         cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
4232         env->cpu_breakpoint[n] = NULL;
4233     }
4234 
4235     if (!extract64(bcr, 0, 1)) {
4236         /* E bit clear : watchpoint disabled */
4237         return;
4238     }
4239 
4240     bt = extract64(bcr, 20, 4);
4241 
4242     switch (bt) {
4243     case 4: /* unlinked address mismatch (reserved if AArch64) */
4244     case 5: /* linked address mismatch (reserved if AArch64) */
4245         qemu_log_mask(LOG_UNIMP,
4246                       "arm: address mismatch breakpoint types not implemented");
4247         return;
4248     case 0: /* unlinked address match */
4249     case 1: /* linked address match */
4250     {
4251         /* Bits [63:49] are hardwired to the value of bit [48]; that is,
4252          * we behave as if the register was sign extended. Bits [1:0] are
4253          * RES0. The BAS field is used to allow setting breakpoints on 16
4254          * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
4255          * a bp will fire if the addresses covered by the bp and the addresses
4256          * covered by the insn overlap but the insn doesn't start at the
4257          * start of the bp address range. We choose to require the insn and
4258          * the bp to have the same address. The constraints on writing to
4259          * BAS enforced in dbgbcr_write mean we have only four cases:
4260          *  0b0000  => no breakpoint
4261          *  0b0011  => breakpoint on addr
4262          *  0b1100  => breakpoint on addr + 2
4263          *  0b1111  => breakpoint on addr
4264          * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
4265          */
4266         int bas = extract64(bcr, 5, 4);
4267         addr = sextract64(bvr, 0, 49) & ~3ULL;
4268         if (bas == 0) {
4269             return;
4270         }
4271         if (bas == 0xc) {
4272             addr += 2;
4273         }
4274         break;
4275     }
4276     case 2: /* unlinked context ID match */
4277     case 8: /* unlinked VMID match (reserved if no EL2) */
4278     case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
4279         qemu_log_mask(LOG_UNIMP,
4280                       "arm: unlinked context breakpoint types not implemented");
4281         return;
4282     case 9: /* linked VMID match (reserved if no EL2) */
4283     case 11: /* linked context ID and VMID match (reserved if no EL2) */
4284     case 3: /* linked context ID match */
4285     default:
4286         /* We must generate no events for Linked context matches (unless
4287          * they are linked to by some other bp/wp, which is handled in
4288          * updates for the linking bp/wp). We choose to also generate no events
4289          * for reserved values.
4290          */
4291         return;
4292     }
4293 
4294     cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
4295 }
4296 
4297 void hw_breakpoint_update_all(ARMCPU *cpu)
4298 {
4299     int i;
4300     CPUARMState *env = &cpu->env;
4301 
4302     /* Completely clear out existing QEMU breakpoints and our array, to
4303      * avoid possible stale entries following migration load.
4304      */
4305     cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
4306     memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
4307 
4308     for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
4309         hw_breakpoint_update(cpu, i);
4310     }
4311 }
4312 
4313 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4314                          uint64_t value)
4315 {
4316     ARMCPU *cpu = arm_env_get_cpu(env);
4317     int i = ri->crm;
4318 
4319     raw_write(env, ri, value);
4320     hw_breakpoint_update(cpu, i);
4321 }
4322 
4323 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4324                          uint64_t value)
4325 {
4326     ARMCPU *cpu = arm_env_get_cpu(env);
4327     int i = ri->crm;
4328 
4329     /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
4330      * copy of BAS[0].
4331      */
4332     value = deposit64(value, 6, 1, extract64(value, 5, 1));
4333     value = deposit64(value, 8, 1, extract64(value, 7, 1));
4334 
4335     raw_write(env, ri, value);
4336     hw_breakpoint_update(cpu, i);
4337 }
4338 
4339 static void define_debug_regs(ARMCPU *cpu)
4340 {
4341     /* Define v7 and v8 architectural debug registers.
4342      * These are just dummy implementations for now.
4343      */
4344     int i;
4345     int wrps, brps, ctx_cmps;
4346     ARMCPRegInfo dbgdidr = {
4347         .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
4348         .access = PL0_R, .accessfn = access_tda,
4349         .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
4350     };
4351 
4352     /* Note that all these register fields hold "number of Xs minus 1". */
4353     brps = extract32(cpu->dbgdidr, 24, 4);
4354     wrps = extract32(cpu->dbgdidr, 28, 4);
4355     ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
4356 
4357     assert(ctx_cmps <= brps);
4358 
4359     /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
4360      * of the debug registers such as number of breakpoints;
4361      * check that if they both exist then they agree.
4362      */
4363     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
4364         assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
4365         assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
4366         assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
4367     }
4368 
4369     define_one_arm_cp_reg(cpu, &dbgdidr);
4370     define_arm_cp_regs(cpu, debug_cp_reginfo);
4371 
4372     if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
4373         define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
4374     }
4375 
4376     for (i = 0; i < brps + 1; i++) {
4377         ARMCPRegInfo dbgregs[] = {
4378             { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
4379               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
4380               .access = PL1_RW, .accessfn = access_tda,
4381               .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
4382               .writefn = dbgbvr_write, .raw_writefn = raw_write
4383             },
4384             { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
4385               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
4386               .access = PL1_RW, .accessfn = access_tda,
4387               .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
4388               .writefn = dbgbcr_write, .raw_writefn = raw_write
4389             },
4390             REGINFO_SENTINEL
4391         };
4392         define_arm_cp_regs(cpu, dbgregs);
4393     }
4394 
4395     for (i = 0; i < wrps + 1; i++) {
4396         ARMCPRegInfo dbgregs[] = {
4397             { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
4398               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
4399               .access = PL1_RW, .accessfn = access_tda,
4400               .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
4401               .writefn = dbgwvr_write, .raw_writefn = raw_write
4402             },
4403             { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
4404               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
4405               .access = PL1_RW, .accessfn = access_tda,
4406               .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
4407               .writefn = dbgwcr_write, .raw_writefn = raw_write
4408             },
4409             REGINFO_SENTINEL
4410         };
4411         define_arm_cp_regs(cpu, dbgregs);
4412     }
4413 }
4414 
4415 void register_cp_regs_for_features(ARMCPU *cpu)
4416 {
4417     /* Register all the coprocessor registers based on feature bits */
4418     CPUARMState *env = &cpu->env;
4419     if (arm_feature(env, ARM_FEATURE_M)) {
4420         /* M profile has no coprocessor registers */
4421         return;
4422     }
4423 
4424     define_arm_cp_regs(cpu, cp_reginfo);
4425     if (!arm_feature(env, ARM_FEATURE_V8)) {
4426         /* Must go early as it is full of wildcards that may be
4427          * overridden by later definitions.
4428          */
4429         define_arm_cp_regs(cpu, not_v8_cp_reginfo);
4430     }
4431 
4432     if (arm_feature(env, ARM_FEATURE_V6)) {
4433         /* The ID registers all have impdef reset values */
4434         ARMCPRegInfo v6_idregs[] = {
4435             { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
4436               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4437               .access = PL1_R, .type = ARM_CP_CONST,
4438               .resetvalue = cpu->id_pfr0 },
4439             { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
4440               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
4441               .access = PL1_R, .type = ARM_CP_CONST,
4442               .resetvalue = cpu->id_pfr1 },
4443             { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
4444               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
4445               .access = PL1_R, .type = ARM_CP_CONST,
4446               .resetvalue = cpu->id_dfr0 },
4447             { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
4448               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
4449               .access = PL1_R, .type = ARM_CP_CONST,
4450               .resetvalue = cpu->id_afr0 },
4451             { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
4452               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
4453               .access = PL1_R, .type = ARM_CP_CONST,
4454               .resetvalue = cpu->id_mmfr0 },
4455             { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
4456               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
4457               .access = PL1_R, .type = ARM_CP_CONST,
4458               .resetvalue = cpu->id_mmfr1 },
4459             { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
4460               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
4461               .access = PL1_R, .type = ARM_CP_CONST,
4462               .resetvalue = cpu->id_mmfr2 },
4463             { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
4464               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
4465               .access = PL1_R, .type = ARM_CP_CONST,
4466               .resetvalue = cpu->id_mmfr3 },
4467             { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
4468               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
4469               .access = PL1_R, .type = ARM_CP_CONST,
4470               .resetvalue = cpu->id_isar0 },
4471             { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
4472               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
4473               .access = PL1_R, .type = ARM_CP_CONST,
4474               .resetvalue = cpu->id_isar1 },
4475             { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
4476               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
4477               .access = PL1_R, .type = ARM_CP_CONST,
4478               .resetvalue = cpu->id_isar2 },
4479             { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
4480               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
4481               .access = PL1_R, .type = ARM_CP_CONST,
4482               .resetvalue = cpu->id_isar3 },
4483             { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
4484               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
4485               .access = PL1_R, .type = ARM_CP_CONST,
4486               .resetvalue = cpu->id_isar4 },
4487             { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
4488               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
4489               .access = PL1_R, .type = ARM_CP_CONST,
4490               .resetvalue = cpu->id_isar5 },
4491             { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
4492               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
4493               .access = PL1_R, .type = ARM_CP_CONST,
4494               .resetvalue = cpu->id_mmfr4 },
4495             /* 7 is as yet unallocated and must RAZ */
4496             { .name = "ID_ISAR7_RESERVED", .state = ARM_CP_STATE_BOTH,
4497               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
4498               .access = PL1_R, .type = ARM_CP_CONST,
4499               .resetvalue = 0 },
4500             REGINFO_SENTINEL
4501         };
4502         define_arm_cp_regs(cpu, v6_idregs);
4503         define_arm_cp_regs(cpu, v6_cp_reginfo);
4504     } else {
4505         define_arm_cp_regs(cpu, not_v6_cp_reginfo);
4506     }
4507     if (arm_feature(env, ARM_FEATURE_V6K)) {
4508         define_arm_cp_regs(cpu, v6k_cp_reginfo);
4509     }
4510     if (arm_feature(env, ARM_FEATURE_V7MP) &&
4511         !arm_feature(env, ARM_FEATURE_MPU)) {
4512         define_arm_cp_regs(cpu, v7mp_cp_reginfo);
4513     }
4514     if (arm_feature(env, ARM_FEATURE_V7)) {
4515         /* v7 performance monitor control register: same implementor
4516          * field as main ID register, and we implement only the cycle
4517          * count register.
4518          */
4519 #ifndef CONFIG_USER_ONLY
4520         ARMCPRegInfo pmcr = {
4521             .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
4522             .access = PL0_RW,
4523             .type = ARM_CP_IO | ARM_CP_ALIAS,
4524             .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
4525             .accessfn = pmreg_access, .writefn = pmcr_write,
4526             .raw_writefn = raw_write,
4527         };
4528         ARMCPRegInfo pmcr64 = {
4529             .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
4530             .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
4531             .access = PL0_RW, .accessfn = pmreg_access,
4532             .type = ARM_CP_IO,
4533             .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
4534             .resetvalue = cpu->midr & 0xff000000,
4535             .writefn = pmcr_write, .raw_writefn = raw_write,
4536         };
4537         define_one_arm_cp_reg(cpu, &pmcr);
4538         define_one_arm_cp_reg(cpu, &pmcr64);
4539 #endif
4540         ARMCPRegInfo clidr = {
4541             .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
4542             .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
4543             .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
4544         };
4545         define_one_arm_cp_reg(cpu, &clidr);
4546         define_arm_cp_regs(cpu, v7_cp_reginfo);
4547         define_debug_regs(cpu);
4548     } else {
4549         define_arm_cp_regs(cpu, not_v7_cp_reginfo);
4550     }
4551     if (arm_feature(env, ARM_FEATURE_V8)) {
4552         /* AArch64 ID registers, which all have impdef reset values.
4553          * Note that within the ID register ranges the unused slots
4554          * must all RAZ, not UNDEF; future architecture versions may
4555          * define new registers here.
4556          */
4557         ARMCPRegInfo v8_idregs[] = {
4558             { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
4559               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
4560               .access = PL1_R, .type = ARM_CP_CONST,
4561               .resetvalue = cpu->id_aa64pfr0 },
4562             { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
4563               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
4564               .access = PL1_R, .type = ARM_CP_CONST,
4565               .resetvalue = cpu->id_aa64pfr1},
4566             { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4567               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
4568               .access = PL1_R, .type = ARM_CP_CONST,
4569               .resetvalue = 0 },
4570             { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4571               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
4572               .access = PL1_R, .type = ARM_CP_CONST,
4573               .resetvalue = 0 },
4574             { .name = "ID_AA64PFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4575               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
4576               .access = PL1_R, .type = ARM_CP_CONST,
4577               .resetvalue = 0 },
4578             { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4579               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
4580               .access = PL1_R, .type = ARM_CP_CONST,
4581               .resetvalue = 0 },
4582             { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4583               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
4584               .access = PL1_R, .type = ARM_CP_CONST,
4585               .resetvalue = 0 },
4586             { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4587               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
4588               .access = PL1_R, .type = ARM_CP_CONST,
4589               .resetvalue = 0 },
4590             { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
4591               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
4592               .access = PL1_R, .type = ARM_CP_CONST,
4593               /* We mask out the PMUVer field, because we don't currently
4594                * implement the PMU. Not advertising it prevents the guest
4595                * from trying to use it and getting UNDEFs on registers we
4596                * don't implement.
4597                */
4598               .resetvalue = cpu->id_aa64dfr0 & ~0xf00 },
4599             { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
4600               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
4601               .access = PL1_R, .type = ARM_CP_CONST,
4602               .resetvalue = cpu->id_aa64dfr1 },
4603             { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4604               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
4605               .access = PL1_R, .type = ARM_CP_CONST,
4606               .resetvalue = 0 },
4607             { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4608               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
4609               .access = PL1_R, .type = ARM_CP_CONST,
4610               .resetvalue = 0 },
4611             { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
4612               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
4613               .access = PL1_R, .type = ARM_CP_CONST,
4614               .resetvalue = cpu->id_aa64afr0 },
4615             { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
4616               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
4617               .access = PL1_R, .type = ARM_CP_CONST,
4618               .resetvalue = cpu->id_aa64afr1 },
4619             { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4620               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
4621               .access = PL1_R, .type = ARM_CP_CONST,
4622               .resetvalue = 0 },
4623             { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4624               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
4625               .access = PL1_R, .type = ARM_CP_CONST,
4626               .resetvalue = 0 },
4627             { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
4628               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
4629               .access = PL1_R, .type = ARM_CP_CONST,
4630               .resetvalue = cpu->id_aa64isar0 },
4631             { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
4632               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
4633               .access = PL1_R, .type = ARM_CP_CONST,
4634               .resetvalue = cpu->id_aa64isar1 },
4635             { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4636               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
4637               .access = PL1_R, .type = ARM_CP_CONST,
4638               .resetvalue = 0 },
4639             { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4640               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
4641               .access = PL1_R, .type = ARM_CP_CONST,
4642               .resetvalue = 0 },
4643             { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4644               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
4645               .access = PL1_R, .type = ARM_CP_CONST,
4646               .resetvalue = 0 },
4647             { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4648               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
4649               .access = PL1_R, .type = ARM_CP_CONST,
4650               .resetvalue = 0 },
4651             { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4652               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
4653               .access = PL1_R, .type = ARM_CP_CONST,
4654               .resetvalue = 0 },
4655             { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4656               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
4657               .access = PL1_R, .type = ARM_CP_CONST,
4658               .resetvalue = 0 },
4659             { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
4660               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
4661               .access = PL1_R, .type = ARM_CP_CONST,
4662               .resetvalue = cpu->id_aa64mmfr0 },
4663             { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
4664               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
4665               .access = PL1_R, .type = ARM_CP_CONST,
4666               .resetvalue = cpu->id_aa64mmfr1 },
4667             { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4668               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
4669               .access = PL1_R, .type = ARM_CP_CONST,
4670               .resetvalue = 0 },
4671             { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4672               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
4673               .access = PL1_R, .type = ARM_CP_CONST,
4674               .resetvalue = 0 },
4675             { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4676               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
4677               .access = PL1_R, .type = ARM_CP_CONST,
4678               .resetvalue = 0 },
4679             { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4680               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
4681               .access = PL1_R, .type = ARM_CP_CONST,
4682               .resetvalue = 0 },
4683             { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4684               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
4685               .access = PL1_R, .type = ARM_CP_CONST,
4686               .resetvalue = 0 },
4687             { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4688               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
4689               .access = PL1_R, .type = ARM_CP_CONST,
4690               .resetvalue = 0 },
4691             { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
4692               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
4693               .access = PL1_R, .type = ARM_CP_CONST,
4694               .resetvalue = cpu->mvfr0 },
4695             { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
4696               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
4697               .access = PL1_R, .type = ARM_CP_CONST,
4698               .resetvalue = cpu->mvfr1 },
4699             { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
4700               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
4701               .access = PL1_R, .type = ARM_CP_CONST,
4702               .resetvalue = cpu->mvfr2 },
4703             { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4704               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
4705               .access = PL1_R, .type = ARM_CP_CONST,
4706               .resetvalue = 0 },
4707             { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4708               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
4709               .access = PL1_R, .type = ARM_CP_CONST,
4710               .resetvalue = 0 },
4711             { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4712               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
4713               .access = PL1_R, .type = ARM_CP_CONST,
4714               .resetvalue = 0 },
4715             { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4716               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
4717               .access = PL1_R, .type = ARM_CP_CONST,
4718               .resetvalue = 0 },
4719             { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4720               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
4721               .access = PL1_R, .type = ARM_CP_CONST,
4722               .resetvalue = 0 },
4723             { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
4724               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
4725               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4726               .resetvalue = cpu->pmceid0 },
4727             { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
4728               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
4729               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4730               .resetvalue = cpu->pmceid0 },
4731             { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
4732               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
4733               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4734               .resetvalue = cpu->pmceid1 },
4735             { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
4736               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
4737               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4738               .resetvalue = cpu->pmceid1 },
4739             REGINFO_SENTINEL
4740         };
4741         /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
4742         if (!arm_feature(env, ARM_FEATURE_EL3) &&
4743             !arm_feature(env, ARM_FEATURE_EL2)) {
4744             ARMCPRegInfo rvbar = {
4745                 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
4746                 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4747                 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
4748             };
4749             define_one_arm_cp_reg(cpu, &rvbar);
4750         }
4751         define_arm_cp_regs(cpu, v8_idregs);
4752         define_arm_cp_regs(cpu, v8_cp_reginfo);
4753     }
4754     if (arm_feature(env, ARM_FEATURE_EL2)) {
4755         uint64_t vmpidr_def = mpidr_read_val(env);
4756         ARMCPRegInfo vpidr_regs[] = {
4757             { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
4758               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
4759               .access = PL2_RW, .accessfn = access_el3_aa32ns,
4760               .resetvalue = cpu->midr,
4761               .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
4762             { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
4763               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
4764               .access = PL2_RW, .resetvalue = cpu->midr,
4765               .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
4766             { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
4767               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
4768               .access = PL2_RW, .accessfn = access_el3_aa32ns,
4769               .resetvalue = vmpidr_def,
4770               .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
4771             { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
4772               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
4773               .access = PL2_RW,
4774               .resetvalue = vmpidr_def,
4775               .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
4776             REGINFO_SENTINEL
4777         };
4778         define_arm_cp_regs(cpu, vpidr_regs);
4779         define_arm_cp_regs(cpu, el2_cp_reginfo);
4780         /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
4781         if (!arm_feature(env, ARM_FEATURE_EL3)) {
4782             ARMCPRegInfo rvbar = {
4783                 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
4784                 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
4785                 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
4786             };
4787             define_one_arm_cp_reg(cpu, &rvbar);
4788         }
4789     } else {
4790         /* If EL2 is missing but higher ELs are enabled, we need to
4791          * register the no_el2 reginfos.
4792          */
4793         if (arm_feature(env, ARM_FEATURE_EL3)) {
4794             /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
4795              * of MIDR_EL1 and MPIDR_EL1.
4796              */
4797             ARMCPRegInfo vpidr_regs[] = {
4798                 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4799                   .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
4800                   .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4801                   .type = ARM_CP_CONST, .resetvalue = cpu->midr,
4802                   .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
4803                 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4804                   .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
4805                   .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4806                   .type = ARM_CP_NO_RAW,
4807                   .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
4808                 REGINFO_SENTINEL
4809             };
4810             define_arm_cp_regs(cpu, vpidr_regs);
4811             define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
4812         }
4813     }
4814     if (arm_feature(env, ARM_FEATURE_EL3)) {
4815         define_arm_cp_regs(cpu, el3_cp_reginfo);
4816         ARMCPRegInfo el3_regs[] = {
4817             { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
4818               .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
4819               .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
4820             { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
4821               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
4822               .access = PL3_RW,
4823               .raw_writefn = raw_write, .writefn = sctlr_write,
4824               .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
4825               .resetvalue = cpu->reset_sctlr },
4826             REGINFO_SENTINEL
4827         };
4828 
4829         define_arm_cp_regs(cpu, el3_regs);
4830     }
4831     /* The behaviour of NSACR is sufficiently various that we don't
4832      * try to describe it in a single reginfo:
4833      *  if EL3 is 64 bit, then trap to EL3 from S EL1,
4834      *     reads as constant 0xc00 from NS EL1 and NS EL2
4835      *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
4836      *  if v7 without EL3, register doesn't exist
4837      *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
4838      */
4839     if (arm_feature(env, ARM_FEATURE_EL3)) {
4840         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
4841             ARMCPRegInfo nsacr = {
4842                 .name = "NSACR", .type = ARM_CP_CONST,
4843                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
4844                 .access = PL1_RW, .accessfn = nsacr_access,
4845                 .resetvalue = 0xc00
4846             };
4847             define_one_arm_cp_reg(cpu, &nsacr);
4848         } else {
4849             ARMCPRegInfo nsacr = {
4850                 .name = "NSACR",
4851                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
4852                 .access = PL3_RW | PL1_R,
4853                 .resetvalue = 0,
4854                 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
4855             };
4856             define_one_arm_cp_reg(cpu, &nsacr);
4857         }
4858     } else {
4859         if (arm_feature(env, ARM_FEATURE_V8)) {
4860             ARMCPRegInfo nsacr = {
4861                 .name = "NSACR", .type = ARM_CP_CONST,
4862                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
4863                 .access = PL1_R,
4864                 .resetvalue = 0xc00
4865             };
4866             define_one_arm_cp_reg(cpu, &nsacr);
4867         }
4868     }
4869 
4870     if (arm_feature(env, ARM_FEATURE_MPU)) {
4871         if (arm_feature(env, ARM_FEATURE_V6)) {
4872             /* PMSAv6 not implemented */
4873             assert(arm_feature(env, ARM_FEATURE_V7));
4874             define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
4875             define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
4876         } else {
4877             define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
4878         }
4879     } else {
4880         define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
4881         define_arm_cp_regs(cpu, vmsa_cp_reginfo);
4882     }
4883     if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
4884         define_arm_cp_regs(cpu, t2ee_cp_reginfo);
4885     }
4886     if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
4887         define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
4888     }
4889     if (arm_feature(env, ARM_FEATURE_VAPA)) {
4890         define_arm_cp_regs(cpu, vapa_cp_reginfo);
4891     }
4892     if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
4893         define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
4894     }
4895     if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
4896         define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
4897     }
4898     if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
4899         define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
4900     }
4901     if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
4902         define_arm_cp_regs(cpu, omap_cp_reginfo);
4903     }
4904     if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
4905         define_arm_cp_regs(cpu, strongarm_cp_reginfo);
4906     }
4907     if (arm_feature(env, ARM_FEATURE_XSCALE)) {
4908         define_arm_cp_regs(cpu, xscale_cp_reginfo);
4909     }
4910     if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
4911         define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
4912     }
4913     if (arm_feature(env, ARM_FEATURE_LPAE)) {
4914         define_arm_cp_regs(cpu, lpae_cp_reginfo);
4915     }
4916     /* Slightly awkwardly, the OMAP and StrongARM cores need all of
4917      * cp15 crn=0 to be writes-ignored, whereas for other cores they should
4918      * be read-only (ie write causes UNDEF exception).
4919      */
4920     {
4921         ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
4922             /* Pre-v8 MIDR space.
4923              * Note that the MIDR isn't a simple constant register because
4924              * of the TI925 behaviour where writes to another register can
4925              * cause the MIDR value to change.
4926              *
4927              * Unimplemented registers in the c15 0 0 0 space default to
4928              * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
4929              * and friends override accordingly.
4930              */
4931             { .name = "MIDR",
4932               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
4933               .access = PL1_R, .resetvalue = cpu->midr,
4934               .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
4935               .readfn = midr_read,
4936               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
4937               .type = ARM_CP_OVERRIDE },
4938             /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
4939             { .name = "DUMMY",
4940               .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
4941               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4942             { .name = "DUMMY",
4943               .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
4944               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4945             { .name = "DUMMY",
4946               .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
4947               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4948             { .name = "DUMMY",
4949               .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
4950               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4951             { .name = "DUMMY",
4952               .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
4953               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4954             REGINFO_SENTINEL
4955         };
4956         ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
4957             { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
4958               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
4959               .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
4960               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
4961               .readfn = midr_read },
4962             /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
4963             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
4964               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
4965               .access = PL1_R, .resetvalue = cpu->midr },
4966             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
4967               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
4968               .access = PL1_R, .resetvalue = cpu->midr },
4969             { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
4970               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
4971               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
4972             REGINFO_SENTINEL
4973         };
4974         ARMCPRegInfo id_cp_reginfo[] = {
4975             /* These are common to v8 and pre-v8 */
4976             { .name = "CTR",
4977               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
4978               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
4979             { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
4980               .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
4981               .access = PL0_R, .accessfn = ctr_el0_access,
4982               .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
4983             /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
4984             { .name = "TCMTR",
4985               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
4986               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4987             REGINFO_SENTINEL
4988         };
4989         /* TLBTR is specific to VMSA */
4990         ARMCPRegInfo id_tlbtr_reginfo = {
4991               .name = "TLBTR",
4992               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
4993               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0,
4994         };
4995         /* MPUIR is specific to PMSA V6+ */
4996         ARMCPRegInfo id_mpuir_reginfo = {
4997               .name = "MPUIR",
4998               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
4999               .access = PL1_R, .type = ARM_CP_CONST,
5000               .resetvalue = cpu->pmsav7_dregion << 8
5001         };
5002         ARMCPRegInfo crn0_wi_reginfo = {
5003             .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
5004             .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
5005             .type = ARM_CP_NOP | ARM_CP_OVERRIDE
5006         };
5007         if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
5008             arm_feature(env, ARM_FEATURE_STRONGARM)) {
5009             ARMCPRegInfo *r;
5010             /* Register the blanket "writes ignored" value first to cover the
5011              * whole space. Then update the specific ID registers to allow write
5012              * access, so that they ignore writes rather than causing them to
5013              * UNDEF.
5014              */
5015             define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
5016             for (r = id_pre_v8_midr_cp_reginfo;
5017                  r->type != ARM_CP_SENTINEL; r++) {
5018                 r->access = PL1_RW;
5019             }
5020             for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
5021                 r->access = PL1_RW;
5022             }
5023             id_tlbtr_reginfo.access = PL1_RW;
5024             id_tlbtr_reginfo.access = PL1_RW;
5025         }
5026         if (arm_feature(env, ARM_FEATURE_V8)) {
5027             define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
5028         } else {
5029             define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
5030         }
5031         define_arm_cp_regs(cpu, id_cp_reginfo);
5032         if (!arm_feature(env, ARM_FEATURE_MPU)) {
5033             define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
5034         } else if (arm_feature(env, ARM_FEATURE_V7)) {
5035             define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
5036         }
5037     }
5038 
5039     if (arm_feature(env, ARM_FEATURE_MPIDR)) {
5040         define_arm_cp_regs(cpu, mpidr_cp_reginfo);
5041     }
5042 
5043     if (arm_feature(env, ARM_FEATURE_AUXCR)) {
5044         ARMCPRegInfo auxcr_reginfo[] = {
5045             { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
5046               .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
5047               .access = PL1_RW, .type = ARM_CP_CONST,
5048               .resetvalue = cpu->reset_auxcr },
5049             { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
5050               .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
5051               .access = PL2_RW, .type = ARM_CP_CONST,
5052               .resetvalue = 0 },
5053             { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
5054               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
5055               .access = PL3_RW, .type = ARM_CP_CONST,
5056               .resetvalue = 0 },
5057             REGINFO_SENTINEL
5058         };
5059         define_arm_cp_regs(cpu, auxcr_reginfo);
5060     }
5061 
5062     if (arm_feature(env, ARM_FEATURE_CBAR)) {
5063         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5064             /* 32 bit view is [31:18] 0...0 [43:32]. */
5065             uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
5066                 | extract64(cpu->reset_cbar, 32, 12);
5067             ARMCPRegInfo cbar_reginfo[] = {
5068                 { .name = "CBAR",
5069                   .type = ARM_CP_CONST,
5070                   .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
5071                   .access = PL1_R, .resetvalue = cpu->reset_cbar },
5072                 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
5073                   .type = ARM_CP_CONST,
5074                   .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
5075                   .access = PL1_R, .resetvalue = cbar32 },
5076                 REGINFO_SENTINEL
5077             };
5078             /* We don't implement a r/w 64 bit CBAR currently */
5079             assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
5080             define_arm_cp_regs(cpu, cbar_reginfo);
5081         } else {
5082             ARMCPRegInfo cbar = {
5083                 .name = "CBAR",
5084                 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
5085                 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
5086                 .fieldoffset = offsetof(CPUARMState,
5087                                         cp15.c15_config_base_address)
5088             };
5089             if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
5090                 cbar.access = PL1_R;
5091                 cbar.fieldoffset = 0;
5092                 cbar.type = ARM_CP_CONST;
5093             }
5094             define_one_arm_cp_reg(cpu, &cbar);
5095         }
5096     }
5097 
5098     if (arm_feature(env, ARM_FEATURE_VBAR)) {
5099         ARMCPRegInfo vbar_cp_reginfo[] = {
5100             { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
5101               .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
5102               .access = PL1_RW, .writefn = vbar_write,
5103               .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
5104                                      offsetof(CPUARMState, cp15.vbar_ns) },
5105               .resetvalue = 0 },
5106             REGINFO_SENTINEL
5107         };
5108         define_arm_cp_regs(cpu, vbar_cp_reginfo);
5109     }
5110 
5111     /* Generic registers whose values depend on the implementation */
5112     {
5113         ARMCPRegInfo sctlr = {
5114             .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
5115             .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
5116             .access = PL1_RW,
5117             .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
5118                                    offsetof(CPUARMState, cp15.sctlr_ns) },
5119             .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
5120             .raw_writefn = raw_write,
5121         };
5122         if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5123             /* Normally we would always end the TB on an SCTLR write, but Linux
5124              * arch/arm/mach-pxa/sleep.S expects two instructions following
5125              * an MMU enable to execute from cache.  Imitate this behaviour.
5126              */
5127             sctlr.type |= ARM_CP_SUPPRESS_TB_END;
5128         }
5129         define_one_arm_cp_reg(cpu, &sctlr);
5130     }
5131 }
5132 
5133 ARMCPU *cpu_arm_init(const char *cpu_model)
5134 {
5135     return ARM_CPU(cpu_generic_init(TYPE_ARM_CPU, cpu_model));
5136 }
5137 
5138 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
5139 {
5140     CPUState *cs = CPU(cpu);
5141     CPUARMState *env = &cpu->env;
5142 
5143     if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5144         gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
5145                                  aarch64_fpu_gdb_set_reg,
5146                                  34, "aarch64-fpu.xml", 0);
5147     } else if (arm_feature(env, ARM_FEATURE_NEON)) {
5148         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5149                                  51, "arm-neon.xml", 0);
5150     } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
5151         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5152                                  35, "arm-vfp3.xml", 0);
5153     } else if (arm_feature(env, ARM_FEATURE_VFP)) {
5154         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5155                                  19, "arm-vfp.xml", 0);
5156     }
5157 }
5158 
5159 /* Sort alphabetically by type name, except for "any". */
5160 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
5161 {
5162     ObjectClass *class_a = (ObjectClass *)a;
5163     ObjectClass *class_b = (ObjectClass *)b;
5164     const char *name_a, *name_b;
5165 
5166     name_a = object_class_get_name(class_a);
5167     name_b = object_class_get_name(class_b);
5168     if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
5169         return 1;
5170     } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
5171         return -1;
5172     } else {
5173         return strcmp(name_a, name_b);
5174     }
5175 }
5176 
5177 static void arm_cpu_list_entry(gpointer data, gpointer user_data)
5178 {
5179     ObjectClass *oc = data;
5180     CPUListState *s = user_data;
5181     const char *typename;
5182     char *name;
5183 
5184     typename = object_class_get_name(oc);
5185     name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
5186     (*s->cpu_fprintf)(s->file, "  %s\n",
5187                       name);
5188     g_free(name);
5189 }
5190 
5191 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
5192 {
5193     CPUListState s = {
5194         .file = f,
5195         .cpu_fprintf = cpu_fprintf,
5196     };
5197     GSList *list;
5198 
5199     list = object_class_get_list(TYPE_ARM_CPU, false);
5200     list = g_slist_sort(list, arm_cpu_list_compare);
5201     (*cpu_fprintf)(f, "Available CPUs:\n");
5202     g_slist_foreach(list, arm_cpu_list_entry, &s);
5203     g_slist_free(list);
5204 #ifdef CONFIG_KVM
5205     /* The 'host' CPU type is dynamically registered only if KVM is
5206      * enabled, so we have to special-case it here:
5207      */
5208     (*cpu_fprintf)(f, "  host (only available in KVM mode)\n");
5209 #endif
5210 }
5211 
5212 static void arm_cpu_add_definition(gpointer data, gpointer user_data)
5213 {
5214     ObjectClass *oc = data;
5215     CpuDefinitionInfoList **cpu_list = user_data;
5216     CpuDefinitionInfoList *entry;
5217     CpuDefinitionInfo *info;
5218     const char *typename;
5219 
5220     typename = object_class_get_name(oc);
5221     info = g_malloc0(sizeof(*info));
5222     info->name = g_strndup(typename,
5223                            strlen(typename) - strlen("-" TYPE_ARM_CPU));
5224     info->q_typename = g_strdup(typename);
5225 
5226     entry = g_malloc0(sizeof(*entry));
5227     entry->value = info;
5228     entry->next = *cpu_list;
5229     *cpu_list = entry;
5230 }
5231 
5232 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
5233 {
5234     CpuDefinitionInfoList *cpu_list = NULL;
5235     GSList *list;
5236 
5237     list = object_class_get_list(TYPE_ARM_CPU, false);
5238     g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
5239     g_slist_free(list);
5240 
5241     return cpu_list;
5242 }
5243 
5244 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
5245                                    void *opaque, int state, int secstate,
5246                                    int crm, int opc1, int opc2)
5247 {
5248     /* Private utility function for define_one_arm_cp_reg_with_opaque():
5249      * add a single reginfo struct to the hash table.
5250      */
5251     uint32_t *key = g_new(uint32_t, 1);
5252     ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
5253     int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
5254     int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
5255 
5256     /* Reset the secure state to the specific incoming state.  This is
5257      * necessary as the register may have been defined with both states.
5258      */
5259     r2->secure = secstate;
5260 
5261     if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
5262         /* Register is banked (using both entries in array).
5263          * Overwriting fieldoffset as the array is only used to define
5264          * banked registers but later only fieldoffset is used.
5265          */
5266         r2->fieldoffset = r->bank_fieldoffsets[ns];
5267     }
5268 
5269     if (state == ARM_CP_STATE_AA32) {
5270         if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
5271             /* If the register is banked then we don't need to migrate or
5272              * reset the 32-bit instance in certain cases:
5273              *
5274              * 1) If the register has both 32-bit and 64-bit instances then we
5275              *    can count on the 64-bit instance taking care of the
5276              *    non-secure bank.
5277              * 2) If ARMv8 is enabled then we can count on a 64-bit version
5278              *    taking care of the secure bank.  This requires that separate
5279              *    32 and 64-bit definitions are provided.
5280              */
5281             if ((r->state == ARM_CP_STATE_BOTH && ns) ||
5282                 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
5283                 r2->type |= ARM_CP_ALIAS;
5284             }
5285         } else if ((secstate != r->secure) && !ns) {
5286             /* The register is not banked so we only want to allow migration of
5287              * the non-secure instance.
5288              */
5289             r2->type |= ARM_CP_ALIAS;
5290         }
5291 
5292         if (r->state == ARM_CP_STATE_BOTH) {
5293             /* We assume it is a cp15 register if the .cp field is left unset.
5294              */
5295             if (r2->cp == 0) {
5296                 r2->cp = 15;
5297             }
5298 
5299 #ifdef HOST_WORDS_BIGENDIAN
5300             if (r2->fieldoffset) {
5301                 r2->fieldoffset += sizeof(uint32_t);
5302             }
5303 #endif
5304         }
5305     }
5306     if (state == ARM_CP_STATE_AA64) {
5307         /* To allow abbreviation of ARMCPRegInfo
5308          * definitions, we treat cp == 0 as equivalent to
5309          * the value for "standard guest-visible sysreg".
5310          * STATE_BOTH definitions are also always "standard
5311          * sysreg" in their AArch64 view (the .cp value may
5312          * be non-zero for the benefit of the AArch32 view).
5313          */
5314         if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
5315             r2->cp = CP_REG_ARM64_SYSREG_CP;
5316         }
5317         *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
5318                                   r2->opc0, opc1, opc2);
5319     } else {
5320         *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
5321     }
5322     if (opaque) {
5323         r2->opaque = opaque;
5324     }
5325     /* reginfo passed to helpers is correct for the actual access,
5326      * and is never ARM_CP_STATE_BOTH:
5327      */
5328     r2->state = state;
5329     /* Make sure reginfo passed to helpers for wildcarded regs
5330      * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
5331      */
5332     r2->crm = crm;
5333     r2->opc1 = opc1;
5334     r2->opc2 = opc2;
5335     /* By convention, for wildcarded registers only the first
5336      * entry is used for migration; the others are marked as
5337      * ALIAS so we don't try to transfer the register
5338      * multiple times. Special registers (ie NOP/WFI) are
5339      * never migratable and not even raw-accessible.
5340      */
5341     if ((r->type & ARM_CP_SPECIAL)) {
5342         r2->type |= ARM_CP_NO_RAW;
5343     }
5344     if (((r->crm == CP_ANY) && crm != 0) ||
5345         ((r->opc1 == CP_ANY) && opc1 != 0) ||
5346         ((r->opc2 == CP_ANY) && opc2 != 0)) {
5347         r2->type |= ARM_CP_ALIAS;
5348     }
5349 
5350     /* Check that raw accesses are either forbidden or handled. Note that
5351      * we can't assert this earlier because the setup of fieldoffset for
5352      * banked registers has to be done first.
5353      */
5354     if (!(r2->type & ARM_CP_NO_RAW)) {
5355         assert(!raw_accessors_invalid(r2));
5356     }
5357 
5358     /* Overriding of an existing definition must be explicitly
5359      * requested.
5360      */
5361     if (!(r->type & ARM_CP_OVERRIDE)) {
5362         ARMCPRegInfo *oldreg;
5363         oldreg = g_hash_table_lookup(cpu->cp_regs, key);
5364         if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
5365             fprintf(stderr, "Register redefined: cp=%d %d bit "
5366                     "crn=%d crm=%d opc1=%d opc2=%d, "
5367                     "was %s, now %s\n", r2->cp, 32 + 32 * is64,
5368                     r2->crn, r2->crm, r2->opc1, r2->opc2,
5369                     oldreg->name, r2->name);
5370             g_assert_not_reached();
5371         }
5372     }
5373     g_hash_table_insert(cpu->cp_regs, key, r2);
5374 }
5375 
5376 
5377 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
5378                                        const ARMCPRegInfo *r, void *opaque)
5379 {
5380     /* Define implementations of coprocessor registers.
5381      * We store these in a hashtable because typically
5382      * there are less than 150 registers in a space which
5383      * is 16*16*16*8*8 = 262144 in size.
5384      * Wildcarding is supported for the crm, opc1 and opc2 fields.
5385      * If a register is defined twice then the second definition is
5386      * used, so this can be used to define some generic registers and
5387      * then override them with implementation specific variations.
5388      * At least one of the original and the second definition should
5389      * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
5390      * against accidental use.
5391      *
5392      * The state field defines whether the register is to be
5393      * visible in the AArch32 or AArch64 execution state. If the
5394      * state is set to ARM_CP_STATE_BOTH then we synthesise a
5395      * reginfo structure for the AArch32 view, which sees the lower
5396      * 32 bits of the 64 bit register.
5397      *
5398      * Only registers visible in AArch64 may set r->opc0; opc0 cannot
5399      * be wildcarded. AArch64 registers are always considered to be 64
5400      * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
5401      * the register, if any.
5402      */
5403     int crm, opc1, opc2, state;
5404     int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
5405     int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
5406     int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
5407     int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
5408     int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
5409     int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
5410     /* 64 bit registers have only CRm and Opc1 fields */
5411     assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
5412     /* op0 only exists in the AArch64 encodings */
5413     assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
5414     /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
5415     assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
5416     /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
5417      * encodes a minimum access level for the register. We roll this
5418      * runtime check into our general permission check code, so check
5419      * here that the reginfo's specified permissions are strict enough
5420      * to encompass the generic architectural permission check.
5421      */
5422     if (r->state != ARM_CP_STATE_AA32) {
5423         int mask = 0;
5424         switch (r->opc1) {
5425         case 0: case 1: case 2:
5426             /* min_EL EL1 */
5427             mask = PL1_RW;
5428             break;
5429         case 3:
5430             /* min_EL EL0 */
5431             mask = PL0_RW;
5432             break;
5433         case 4:
5434             /* min_EL EL2 */
5435             mask = PL2_RW;
5436             break;
5437         case 5:
5438             /* unallocated encoding, so not possible */
5439             assert(false);
5440             break;
5441         case 6:
5442             /* min_EL EL3 */
5443             mask = PL3_RW;
5444             break;
5445         case 7:
5446             /* min_EL EL1, secure mode only (we don't check the latter) */
5447             mask = PL1_RW;
5448             break;
5449         default:
5450             /* broken reginfo with out-of-range opc1 */
5451             assert(false);
5452             break;
5453         }
5454         /* assert our permissions are not too lax (stricter is fine) */
5455         assert((r->access & ~mask) == 0);
5456     }
5457 
5458     /* Check that the register definition has enough info to handle
5459      * reads and writes if they are permitted.
5460      */
5461     if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
5462         if (r->access & PL3_R) {
5463             assert((r->fieldoffset ||
5464                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
5465                    r->readfn);
5466         }
5467         if (r->access & PL3_W) {
5468             assert((r->fieldoffset ||
5469                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
5470                    r->writefn);
5471         }
5472     }
5473     /* Bad type field probably means missing sentinel at end of reg list */
5474     assert(cptype_valid(r->type));
5475     for (crm = crmmin; crm <= crmmax; crm++) {
5476         for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
5477             for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
5478                 for (state = ARM_CP_STATE_AA32;
5479                      state <= ARM_CP_STATE_AA64; state++) {
5480                     if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
5481                         continue;
5482                     }
5483                     if (state == ARM_CP_STATE_AA32) {
5484                         /* Under AArch32 CP registers can be common
5485                          * (same for secure and non-secure world) or banked.
5486                          */
5487                         switch (r->secure) {
5488                         case ARM_CP_SECSTATE_S:
5489                         case ARM_CP_SECSTATE_NS:
5490                             add_cpreg_to_hashtable(cpu, r, opaque, state,
5491                                                    r->secure, crm, opc1, opc2);
5492                             break;
5493                         default:
5494                             add_cpreg_to_hashtable(cpu, r, opaque, state,
5495                                                    ARM_CP_SECSTATE_S,
5496                                                    crm, opc1, opc2);
5497                             add_cpreg_to_hashtable(cpu, r, opaque, state,
5498                                                    ARM_CP_SECSTATE_NS,
5499                                                    crm, opc1, opc2);
5500                             break;
5501                         }
5502                     } else {
5503                         /* AArch64 registers get mapped to non-secure instance
5504                          * of AArch32 */
5505                         add_cpreg_to_hashtable(cpu, r, opaque, state,
5506                                                ARM_CP_SECSTATE_NS,
5507                                                crm, opc1, opc2);
5508                     }
5509                 }
5510             }
5511         }
5512     }
5513 }
5514 
5515 void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
5516                                     const ARMCPRegInfo *regs, void *opaque)
5517 {
5518     /* Define a whole list of registers */
5519     const ARMCPRegInfo *r;
5520     for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
5521         define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
5522     }
5523 }
5524 
5525 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
5526 {
5527     return g_hash_table_lookup(cpregs, &encoded_cp);
5528 }
5529 
5530 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
5531                          uint64_t value)
5532 {
5533     /* Helper coprocessor write function for write-ignore registers */
5534 }
5535 
5536 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
5537 {
5538     /* Helper coprocessor write function for read-as-zero registers */
5539     return 0;
5540 }
5541 
5542 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
5543 {
5544     /* Helper coprocessor reset function for do-nothing-on-reset registers */
5545 }
5546 
5547 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
5548 {
5549     /* Return true if it is not valid for us to switch to
5550      * this CPU mode (ie all the UNPREDICTABLE cases in
5551      * the ARM ARM CPSRWriteByInstr pseudocode).
5552      */
5553 
5554     /* Changes to or from Hyp via MSR and CPS are illegal. */
5555     if (write_type == CPSRWriteByInstr &&
5556         ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
5557          mode == ARM_CPU_MODE_HYP)) {
5558         return 1;
5559     }
5560 
5561     switch (mode) {
5562     case ARM_CPU_MODE_USR:
5563         return 0;
5564     case ARM_CPU_MODE_SYS:
5565     case ARM_CPU_MODE_SVC:
5566     case ARM_CPU_MODE_ABT:
5567     case ARM_CPU_MODE_UND:
5568     case ARM_CPU_MODE_IRQ:
5569     case ARM_CPU_MODE_FIQ:
5570         /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
5571          * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
5572          */
5573         /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
5574          * and CPS are treated as illegal mode changes.
5575          */
5576         if (write_type == CPSRWriteByInstr &&
5577             (env->cp15.hcr_el2 & HCR_TGE) &&
5578             (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
5579             !arm_is_secure_below_el3(env)) {
5580             return 1;
5581         }
5582         return 0;
5583     case ARM_CPU_MODE_HYP:
5584         return !arm_feature(env, ARM_FEATURE_EL2)
5585             || arm_current_el(env) < 2 || arm_is_secure(env);
5586     case ARM_CPU_MODE_MON:
5587         return arm_current_el(env) < 3;
5588     default:
5589         return 1;
5590     }
5591 }
5592 
5593 uint32_t cpsr_read(CPUARMState *env)
5594 {
5595     int ZF;
5596     ZF = (env->ZF == 0);
5597     return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
5598         (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
5599         | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
5600         | ((env->condexec_bits & 0xfc) << 8)
5601         | (env->GE << 16) | (env->daif & CPSR_AIF);
5602 }
5603 
5604 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
5605                 CPSRWriteType write_type)
5606 {
5607     uint32_t changed_daif;
5608 
5609     if (mask & CPSR_NZCV) {
5610         env->ZF = (~val) & CPSR_Z;
5611         env->NF = val;
5612         env->CF = (val >> 29) & 1;
5613         env->VF = (val << 3) & 0x80000000;
5614     }
5615     if (mask & CPSR_Q)
5616         env->QF = ((val & CPSR_Q) != 0);
5617     if (mask & CPSR_T)
5618         env->thumb = ((val & CPSR_T) != 0);
5619     if (mask & CPSR_IT_0_1) {
5620         env->condexec_bits &= ~3;
5621         env->condexec_bits |= (val >> 25) & 3;
5622     }
5623     if (mask & CPSR_IT_2_7) {
5624         env->condexec_bits &= 3;
5625         env->condexec_bits |= (val >> 8) & 0xfc;
5626     }
5627     if (mask & CPSR_GE) {
5628         env->GE = (val >> 16) & 0xf;
5629     }
5630 
5631     /* In a V7 implementation that includes the security extensions but does
5632      * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
5633      * whether non-secure software is allowed to change the CPSR_F and CPSR_A
5634      * bits respectively.
5635      *
5636      * In a V8 implementation, it is permitted for privileged software to
5637      * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
5638      */
5639     if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
5640         arm_feature(env, ARM_FEATURE_EL3) &&
5641         !arm_feature(env, ARM_FEATURE_EL2) &&
5642         !arm_is_secure(env)) {
5643 
5644         changed_daif = (env->daif ^ val) & mask;
5645 
5646         if (changed_daif & CPSR_A) {
5647             /* Check to see if we are allowed to change the masking of async
5648              * abort exceptions from a non-secure state.
5649              */
5650             if (!(env->cp15.scr_el3 & SCR_AW)) {
5651                 qemu_log_mask(LOG_GUEST_ERROR,
5652                               "Ignoring attempt to switch CPSR_A flag from "
5653                               "non-secure world with SCR.AW bit clear\n");
5654                 mask &= ~CPSR_A;
5655             }
5656         }
5657 
5658         if (changed_daif & CPSR_F) {
5659             /* Check to see if we are allowed to change the masking of FIQ
5660              * exceptions from a non-secure state.
5661              */
5662             if (!(env->cp15.scr_el3 & SCR_FW)) {
5663                 qemu_log_mask(LOG_GUEST_ERROR,
5664                               "Ignoring attempt to switch CPSR_F flag from "
5665                               "non-secure world with SCR.FW bit clear\n");
5666                 mask &= ~CPSR_F;
5667             }
5668 
5669             /* Check whether non-maskable FIQ (NMFI) support is enabled.
5670              * If this bit is set software is not allowed to mask
5671              * FIQs, but is allowed to set CPSR_F to 0.
5672              */
5673             if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
5674                 (val & CPSR_F)) {
5675                 qemu_log_mask(LOG_GUEST_ERROR,
5676                               "Ignoring attempt to enable CPSR_F flag "
5677                               "(non-maskable FIQ [NMFI] support enabled)\n");
5678                 mask &= ~CPSR_F;
5679             }
5680         }
5681     }
5682 
5683     env->daif &= ~(CPSR_AIF & mask);
5684     env->daif |= val & CPSR_AIF & mask;
5685 
5686     if (write_type != CPSRWriteRaw &&
5687         ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
5688         if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
5689             /* Note that we can only get here in USR mode if this is a
5690              * gdb stub write; for this case we follow the architectural
5691              * behaviour for guest writes in USR mode of ignoring an attempt
5692              * to switch mode. (Those are caught by translate.c for writes
5693              * triggered by guest instructions.)
5694              */
5695             mask &= ~CPSR_M;
5696         } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
5697             /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
5698              * v7, and has defined behaviour in v8:
5699              *  + leave CPSR.M untouched
5700              *  + allow changes to the other CPSR fields
5701              *  + set PSTATE.IL
5702              * For user changes via the GDB stub, we don't set PSTATE.IL,
5703              * as this would be unnecessarily harsh for a user error.
5704              */
5705             mask &= ~CPSR_M;
5706             if (write_type != CPSRWriteByGDBStub &&
5707                 arm_feature(env, ARM_FEATURE_V8)) {
5708                 mask |= CPSR_IL;
5709                 val |= CPSR_IL;
5710             }
5711         } else {
5712             switch_mode(env, val & CPSR_M);
5713         }
5714     }
5715     mask &= ~CACHED_CPSR_BITS;
5716     env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
5717 }
5718 
5719 /* Sign/zero extend */
5720 uint32_t HELPER(sxtb16)(uint32_t x)
5721 {
5722     uint32_t res;
5723     res = (uint16_t)(int8_t)x;
5724     res |= (uint32_t)(int8_t)(x >> 16) << 16;
5725     return res;
5726 }
5727 
5728 uint32_t HELPER(uxtb16)(uint32_t x)
5729 {
5730     uint32_t res;
5731     res = (uint16_t)(uint8_t)x;
5732     res |= (uint32_t)(uint8_t)(x >> 16) << 16;
5733     return res;
5734 }
5735 
5736 int32_t HELPER(sdiv)(int32_t num, int32_t den)
5737 {
5738     if (den == 0)
5739       return 0;
5740     if (num == INT_MIN && den == -1)
5741       return INT_MIN;
5742     return num / den;
5743 }
5744 
5745 uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
5746 {
5747     if (den == 0)
5748       return 0;
5749     return num / den;
5750 }
5751 
5752 uint32_t HELPER(rbit)(uint32_t x)
5753 {
5754     return revbit32(x);
5755 }
5756 
5757 #if defined(CONFIG_USER_ONLY)
5758 
5759 /* These should probably raise undefined insn exceptions.  */
5760 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
5761 {
5762     ARMCPU *cpu = arm_env_get_cpu(env);
5763 
5764     cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
5765 }
5766 
5767 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
5768 {
5769     ARMCPU *cpu = arm_env_get_cpu(env);
5770 
5771     cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
5772     return 0;
5773 }
5774 
5775 void switch_mode(CPUARMState *env, int mode)
5776 {
5777     ARMCPU *cpu = arm_env_get_cpu(env);
5778 
5779     if (mode != ARM_CPU_MODE_USR) {
5780         cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
5781     }
5782 }
5783 
5784 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
5785                                  uint32_t cur_el, bool secure)
5786 {
5787     return 1;
5788 }
5789 
5790 void aarch64_sync_64_to_32(CPUARMState *env)
5791 {
5792     g_assert_not_reached();
5793 }
5794 
5795 #else
5796 
5797 void switch_mode(CPUARMState *env, int mode)
5798 {
5799     int old_mode;
5800     int i;
5801 
5802     old_mode = env->uncached_cpsr & CPSR_M;
5803     if (mode == old_mode)
5804         return;
5805 
5806     if (old_mode == ARM_CPU_MODE_FIQ) {
5807         memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
5808         memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
5809     } else if (mode == ARM_CPU_MODE_FIQ) {
5810         memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
5811         memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
5812     }
5813 
5814     i = bank_number(old_mode);
5815     env->banked_r13[i] = env->regs[13];
5816     env->banked_r14[i] = env->regs[14];
5817     env->banked_spsr[i] = env->spsr;
5818 
5819     i = bank_number(mode);
5820     env->regs[13] = env->banked_r13[i];
5821     env->regs[14] = env->banked_r14[i];
5822     env->spsr = env->banked_spsr[i];
5823 }
5824 
5825 /* Physical Interrupt Target EL Lookup Table
5826  *
5827  * [ From ARM ARM section G1.13.4 (Table G1-15) ]
5828  *
5829  * The below multi-dimensional table is used for looking up the target
5830  * exception level given numerous condition criteria.  Specifically, the
5831  * target EL is based on SCR and HCR routing controls as well as the
5832  * currently executing EL and secure state.
5833  *
5834  *    Dimensions:
5835  *    target_el_table[2][2][2][2][2][4]
5836  *                    |  |  |  |  |  +--- Current EL
5837  *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
5838  *                    |  |  |  +--------- HCR mask override
5839  *                    |  |  +------------ SCR exec state control
5840  *                    |  +--------------- SCR mask override
5841  *                    +------------------ 32-bit(0)/64-bit(1) EL3
5842  *
5843  *    The table values are as such:
5844  *    0-3 = EL0-EL3
5845  *     -1 = Cannot occur
5846  *
5847  * The ARM ARM target EL table includes entries indicating that an "exception
5848  * is not taken".  The two cases where this is applicable are:
5849  *    1) An exception is taken from EL3 but the SCR does not have the exception
5850  *    routed to EL3.
5851  *    2) An exception is taken from EL2 but the HCR does not have the exception
5852  *    routed to EL2.
5853  * In these two cases, the below table contain a target of EL1.  This value is
5854  * returned as it is expected that the consumer of the table data will check
5855  * for "target EL >= current EL" to ensure the exception is not taken.
5856  *
5857  *            SCR     HCR
5858  *         64  EA     AMO                 From
5859  *        BIT IRQ     IMO      Non-secure         Secure
5860  *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
5861  */
5862 static const int8_t target_el_table[2][2][2][2][2][4] = {
5863     {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
5864        {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
5865       {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
5866        {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
5867      {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
5868        {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
5869       {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
5870        {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
5871     {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
5872        {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},
5873       {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1, -1,  1 },},
5874        {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},},
5875      {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
5876        {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
5877       {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
5878        {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},},},
5879 };
5880 
5881 /*
5882  * Determine the target EL for physical exceptions
5883  */
5884 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
5885                                  uint32_t cur_el, bool secure)
5886 {
5887     CPUARMState *env = cs->env_ptr;
5888     int rw;
5889     int scr;
5890     int hcr;
5891     int target_el;
5892     /* Is the highest EL AArch64? */
5893     int is64 = arm_feature(env, ARM_FEATURE_AARCH64);
5894 
5895     if (arm_feature(env, ARM_FEATURE_EL3)) {
5896         rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
5897     } else {
5898         /* Either EL2 is the highest EL (and so the EL2 register width
5899          * is given by is64); or there is no EL2 or EL3, in which case
5900          * the value of 'rw' does not affect the table lookup anyway.
5901          */
5902         rw = is64;
5903     }
5904 
5905     switch (excp_idx) {
5906     case EXCP_IRQ:
5907         scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
5908         hcr = ((env->cp15.hcr_el2 & HCR_IMO) == HCR_IMO);
5909         break;
5910     case EXCP_FIQ:
5911         scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
5912         hcr = ((env->cp15.hcr_el2 & HCR_FMO) == HCR_FMO);
5913         break;
5914     default:
5915         scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
5916         hcr = ((env->cp15.hcr_el2 & HCR_AMO) == HCR_AMO);
5917         break;
5918     };
5919 
5920     /* If HCR.TGE is set then HCR is treated as being 1 */
5921     hcr |= ((env->cp15.hcr_el2 & HCR_TGE) == HCR_TGE);
5922 
5923     /* Perform a table-lookup for the target EL given the current state */
5924     target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
5925 
5926     assert(target_el > 0);
5927 
5928     return target_el;
5929 }
5930 
5931 static void v7m_push(CPUARMState *env, uint32_t val)
5932 {
5933     CPUState *cs = CPU(arm_env_get_cpu(env));
5934 
5935     env->regs[13] -= 4;
5936     stl_phys(cs->as, env->regs[13], val);
5937 }
5938 
5939 static uint32_t v7m_pop(CPUARMState *env)
5940 {
5941     CPUState *cs = CPU(arm_env_get_cpu(env));
5942     uint32_t val;
5943 
5944     val = ldl_phys(cs->as, env->regs[13]);
5945     env->regs[13] += 4;
5946     return val;
5947 }
5948 
5949 /* Switch to V7M main or process stack pointer.  */
5950 static void switch_v7m_sp(CPUARMState *env, int process)
5951 {
5952     uint32_t tmp;
5953     if (env->v7m.current_sp != process) {
5954         tmp = env->v7m.other_sp;
5955         env->v7m.other_sp = env->regs[13];
5956         env->regs[13] = tmp;
5957         env->v7m.current_sp = process;
5958     }
5959 }
5960 
5961 static void do_v7m_exception_exit(CPUARMState *env)
5962 {
5963     uint32_t type;
5964     uint32_t xpsr;
5965 
5966     type = env->regs[15];
5967     if (env->v7m.exception != 0)
5968         armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
5969 
5970     /* Switch to the target stack.  */
5971     switch_v7m_sp(env, (type & 4) != 0);
5972     /* Pop registers.  */
5973     env->regs[0] = v7m_pop(env);
5974     env->regs[1] = v7m_pop(env);
5975     env->regs[2] = v7m_pop(env);
5976     env->regs[3] = v7m_pop(env);
5977     env->regs[12] = v7m_pop(env);
5978     env->regs[14] = v7m_pop(env);
5979     env->regs[15] = v7m_pop(env);
5980     if (env->regs[15] & 1) {
5981         qemu_log_mask(LOG_GUEST_ERROR,
5982                       "M profile return from interrupt with misaligned "
5983                       "PC is UNPREDICTABLE\n");
5984         /* Actual hardware seems to ignore the lsbit, and there are several
5985          * RTOSes out there which incorrectly assume the r15 in the stack
5986          * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value.
5987          */
5988         env->regs[15] &= ~1U;
5989     }
5990     xpsr = v7m_pop(env);
5991     xpsr_write(env, xpsr, 0xfffffdff);
5992     /* Undo stack alignment.  */
5993     if (xpsr & 0x200)
5994         env->regs[13] |= 4;
5995     /* ??? The exception return type specifies Thread/Handler mode.  However
5996        this is also implied by the xPSR value. Not sure what to do
5997        if there is a mismatch.  */
5998     /* ??? Likewise for mismatches between the CONTROL register and the stack
5999        pointer.  */
6000 }
6001 
6002 static void arm_log_exception(int idx)
6003 {
6004     if (qemu_loglevel_mask(CPU_LOG_INT)) {
6005         const char *exc = NULL;
6006 
6007         if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
6008             exc = excnames[idx];
6009         }
6010         if (!exc) {
6011             exc = "unknown";
6012         }
6013         qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
6014     }
6015 }
6016 
6017 void arm_v7m_cpu_do_interrupt(CPUState *cs)
6018 {
6019     ARMCPU *cpu = ARM_CPU(cs);
6020     CPUARMState *env = &cpu->env;
6021     uint32_t xpsr = xpsr_read(env);
6022     uint32_t lr;
6023     uint32_t addr;
6024 
6025     arm_log_exception(cs->exception_index);
6026 
6027     lr = 0xfffffff1;
6028     if (env->v7m.current_sp)
6029         lr |= 4;
6030     if (env->v7m.exception == 0)
6031         lr |= 8;
6032 
6033     /* For exceptions we just mark as pending on the NVIC, and let that
6034        handle it.  */
6035     /* TODO: Need to escalate if the current priority is higher than the
6036        one we're raising.  */
6037     switch (cs->exception_index) {
6038     case EXCP_UDEF:
6039         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
6040         return;
6041     case EXCP_SWI:
6042         /* The PC already points to the next instruction.  */
6043         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
6044         return;
6045     case EXCP_PREFETCH_ABORT:
6046     case EXCP_DATA_ABORT:
6047         /* TODO: if we implemented the MPU registers, this is where we
6048          * should set the MMFAR, etc from exception.fsr and exception.vaddress.
6049          */
6050         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
6051         return;
6052     case EXCP_BKPT:
6053         if (semihosting_enabled()) {
6054             int nr;
6055             nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
6056             if (nr == 0xab) {
6057                 env->regs[15] += 2;
6058                 qemu_log_mask(CPU_LOG_INT,
6059                               "...handling as semihosting call 0x%x\n",
6060                               env->regs[0]);
6061                 env->regs[0] = do_arm_semihosting(env);
6062                 return;
6063             }
6064         }
6065         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
6066         return;
6067     case EXCP_IRQ:
6068         env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
6069         break;
6070     case EXCP_EXCEPTION_EXIT:
6071         do_v7m_exception_exit(env);
6072         return;
6073     default:
6074         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
6075         return; /* Never happens.  Keep compiler happy.  */
6076     }
6077 
6078     /* Align stack pointer.  */
6079     /* ??? Should only do this if Configuration Control Register
6080        STACKALIGN bit is set.  */
6081     if (env->regs[13] & 4) {
6082         env->regs[13] -= 4;
6083         xpsr |= 0x200;
6084     }
6085     /* Switch to the handler mode.  */
6086     v7m_push(env, xpsr);
6087     v7m_push(env, env->regs[15]);
6088     v7m_push(env, env->regs[14]);
6089     v7m_push(env, env->regs[12]);
6090     v7m_push(env, env->regs[3]);
6091     v7m_push(env, env->regs[2]);
6092     v7m_push(env, env->regs[1]);
6093     v7m_push(env, env->regs[0]);
6094     switch_v7m_sp(env, 0);
6095     /* Clear IT bits */
6096     env->condexec_bits = 0;
6097     env->regs[14] = lr;
6098     addr = ldl_phys(cs->as, env->v7m.vecbase + env->v7m.exception * 4);
6099     env->regs[15] = addr & 0xfffffffe;
6100     env->thumb = addr & 1;
6101 }
6102 
6103 /* Function used to synchronize QEMU's AArch64 register set with AArch32
6104  * register set.  This is necessary when switching between AArch32 and AArch64
6105  * execution state.
6106  */
6107 void aarch64_sync_32_to_64(CPUARMState *env)
6108 {
6109     int i;
6110     uint32_t mode = env->uncached_cpsr & CPSR_M;
6111 
6112     /* We can blanket copy R[0:7] to X[0:7] */
6113     for (i = 0; i < 8; i++) {
6114         env->xregs[i] = env->regs[i];
6115     }
6116 
6117     /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
6118      * Otherwise, they come from the banked user regs.
6119      */
6120     if (mode == ARM_CPU_MODE_FIQ) {
6121         for (i = 8; i < 13; i++) {
6122             env->xregs[i] = env->usr_regs[i - 8];
6123         }
6124     } else {
6125         for (i = 8; i < 13; i++) {
6126             env->xregs[i] = env->regs[i];
6127         }
6128     }
6129 
6130     /* Registers x13-x23 are the various mode SP and FP registers. Registers
6131      * r13 and r14 are only copied if we are in that mode, otherwise we copy
6132      * from the mode banked register.
6133      */
6134     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
6135         env->xregs[13] = env->regs[13];
6136         env->xregs[14] = env->regs[14];
6137     } else {
6138         env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
6139         /* HYP is an exception in that it is copied from r14 */
6140         if (mode == ARM_CPU_MODE_HYP) {
6141             env->xregs[14] = env->regs[14];
6142         } else {
6143             env->xregs[14] = env->banked_r14[bank_number(ARM_CPU_MODE_USR)];
6144         }
6145     }
6146 
6147     if (mode == ARM_CPU_MODE_HYP) {
6148         env->xregs[15] = env->regs[13];
6149     } else {
6150         env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
6151     }
6152 
6153     if (mode == ARM_CPU_MODE_IRQ) {
6154         env->xregs[16] = env->regs[14];
6155         env->xregs[17] = env->regs[13];
6156     } else {
6157         env->xregs[16] = env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)];
6158         env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
6159     }
6160 
6161     if (mode == ARM_CPU_MODE_SVC) {
6162         env->xregs[18] = env->regs[14];
6163         env->xregs[19] = env->regs[13];
6164     } else {
6165         env->xregs[18] = env->banked_r14[bank_number(ARM_CPU_MODE_SVC)];
6166         env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
6167     }
6168 
6169     if (mode == ARM_CPU_MODE_ABT) {
6170         env->xregs[20] = env->regs[14];
6171         env->xregs[21] = env->regs[13];
6172     } else {
6173         env->xregs[20] = env->banked_r14[bank_number(ARM_CPU_MODE_ABT)];
6174         env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
6175     }
6176 
6177     if (mode == ARM_CPU_MODE_UND) {
6178         env->xregs[22] = env->regs[14];
6179         env->xregs[23] = env->regs[13];
6180     } else {
6181         env->xregs[22] = env->banked_r14[bank_number(ARM_CPU_MODE_UND)];
6182         env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
6183     }
6184 
6185     /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
6186      * mode, then we can copy from r8-r14.  Otherwise, we copy from the
6187      * FIQ bank for r8-r14.
6188      */
6189     if (mode == ARM_CPU_MODE_FIQ) {
6190         for (i = 24; i < 31; i++) {
6191             env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
6192         }
6193     } else {
6194         for (i = 24; i < 29; i++) {
6195             env->xregs[i] = env->fiq_regs[i - 24];
6196         }
6197         env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
6198         env->xregs[30] = env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)];
6199     }
6200 
6201     env->pc = env->regs[15];
6202 }
6203 
6204 /* Function used to synchronize QEMU's AArch32 register set with AArch64
6205  * register set.  This is necessary when switching between AArch32 and AArch64
6206  * execution state.
6207  */
6208 void aarch64_sync_64_to_32(CPUARMState *env)
6209 {
6210     int i;
6211     uint32_t mode = env->uncached_cpsr & CPSR_M;
6212 
6213     /* We can blanket copy X[0:7] to R[0:7] */
6214     for (i = 0; i < 8; i++) {
6215         env->regs[i] = env->xregs[i];
6216     }
6217 
6218     /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
6219      * Otherwise, we copy x8-x12 into the banked user regs.
6220      */
6221     if (mode == ARM_CPU_MODE_FIQ) {
6222         for (i = 8; i < 13; i++) {
6223             env->usr_regs[i - 8] = env->xregs[i];
6224         }
6225     } else {
6226         for (i = 8; i < 13; i++) {
6227             env->regs[i] = env->xregs[i];
6228         }
6229     }
6230 
6231     /* Registers r13 & r14 depend on the current mode.
6232      * If we are in a given mode, we copy the corresponding x registers to r13
6233      * and r14.  Otherwise, we copy the x register to the banked r13 and r14
6234      * for the mode.
6235      */
6236     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
6237         env->regs[13] = env->xregs[13];
6238         env->regs[14] = env->xregs[14];
6239     } else {
6240         env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
6241 
6242         /* HYP is an exception in that it does not have its own banked r14 but
6243          * shares the USR r14
6244          */
6245         if (mode == ARM_CPU_MODE_HYP) {
6246             env->regs[14] = env->xregs[14];
6247         } else {
6248             env->banked_r14[bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
6249         }
6250     }
6251 
6252     if (mode == ARM_CPU_MODE_HYP) {
6253         env->regs[13] = env->xregs[15];
6254     } else {
6255         env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
6256     }
6257 
6258     if (mode == ARM_CPU_MODE_IRQ) {
6259         env->regs[14] = env->xregs[16];
6260         env->regs[13] = env->xregs[17];
6261     } else {
6262         env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
6263         env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
6264     }
6265 
6266     if (mode == ARM_CPU_MODE_SVC) {
6267         env->regs[14] = env->xregs[18];
6268         env->regs[13] = env->xregs[19];
6269     } else {
6270         env->banked_r14[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
6271         env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
6272     }
6273 
6274     if (mode == ARM_CPU_MODE_ABT) {
6275         env->regs[14] = env->xregs[20];
6276         env->regs[13] = env->xregs[21];
6277     } else {
6278         env->banked_r14[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
6279         env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
6280     }
6281 
6282     if (mode == ARM_CPU_MODE_UND) {
6283         env->regs[14] = env->xregs[22];
6284         env->regs[13] = env->xregs[23];
6285     } else {
6286         env->banked_r14[bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
6287         env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
6288     }
6289 
6290     /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
6291      * mode, then we can copy to r8-r14.  Otherwise, we copy to the
6292      * FIQ bank for r8-r14.
6293      */
6294     if (mode == ARM_CPU_MODE_FIQ) {
6295         for (i = 24; i < 31; i++) {
6296             env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
6297         }
6298     } else {
6299         for (i = 24; i < 29; i++) {
6300             env->fiq_regs[i - 24] = env->xregs[i];
6301         }
6302         env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
6303         env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
6304     }
6305 
6306     env->regs[15] = env->pc;
6307 }
6308 
6309 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
6310 {
6311     ARMCPU *cpu = ARM_CPU(cs);
6312     CPUARMState *env = &cpu->env;
6313     uint32_t addr;
6314     uint32_t mask;
6315     int new_mode;
6316     uint32_t offset;
6317     uint32_t moe;
6318 
6319     /* If this is a debug exception we must update the DBGDSCR.MOE bits */
6320     switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) {
6321     case EC_BREAKPOINT:
6322     case EC_BREAKPOINT_SAME_EL:
6323         moe = 1;
6324         break;
6325     case EC_WATCHPOINT:
6326     case EC_WATCHPOINT_SAME_EL:
6327         moe = 10;
6328         break;
6329     case EC_AA32_BKPT:
6330         moe = 3;
6331         break;
6332     case EC_VECTORCATCH:
6333         moe = 5;
6334         break;
6335     default:
6336         moe = 0;
6337         break;
6338     }
6339 
6340     if (moe) {
6341         env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
6342     }
6343 
6344     /* TODO: Vectored interrupt controller.  */
6345     switch (cs->exception_index) {
6346     case EXCP_UDEF:
6347         new_mode = ARM_CPU_MODE_UND;
6348         addr = 0x04;
6349         mask = CPSR_I;
6350         if (env->thumb)
6351             offset = 2;
6352         else
6353             offset = 4;
6354         break;
6355     case EXCP_SWI:
6356         new_mode = ARM_CPU_MODE_SVC;
6357         addr = 0x08;
6358         mask = CPSR_I;
6359         /* The PC already points to the next instruction.  */
6360         offset = 0;
6361         break;
6362     case EXCP_BKPT:
6363         env->exception.fsr = 2;
6364         /* Fall through to prefetch abort.  */
6365     case EXCP_PREFETCH_ABORT:
6366         A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
6367         A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
6368         qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
6369                       env->exception.fsr, (uint32_t)env->exception.vaddress);
6370         new_mode = ARM_CPU_MODE_ABT;
6371         addr = 0x0c;
6372         mask = CPSR_A | CPSR_I;
6373         offset = 4;
6374         break;
6375     case EXCP_DATA_ABORT:
6376         A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
6377         A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
6378         qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
6379                       env->exception.fsr,
6380                       (uint32_t)env->exception.vaddress);
6381         new_mode = ARM_CPU_MODE_ABT;
6382         addr = 0x10;
6383         mask = CPSR_A | CPSR_I;
6384         offset = 8;
6385         break;
6386     case EXCP_IRQ:
6387         new_mode = ARM_CPU_MODE_IRQ;
6388         addr = 0x18;
6389         /* Disable IRQ and imprecise data aborts.  */
6390         mask = CPSR_A | CPSR_I;
6391         offset = 4;
6392         if (env->cp15.scr_el3 & SCR_IRQ) {
6393             /* IRQ routed to monitor mode */
6394             new_mode = ARM_CPU_MODE_MON;
6395             mask |= CPSR_F;
6396         }
6397         break;
6398     case EXCP_FIQ:
6399         new_mode = ARM_CPU_MODE_FIQ;
6400         addr = 0x1c;
6401         /* Disable FIQ, IRQ and imprecise data aborts.  */
6402         mask = CPSR_A | CPSR_I | CPSR_F;
6403         if (env->cp15.scr_el3 & SCR_FIQ) {
6404             /* FIQ routed to monitor mode */
6405             new_mode = ARM_CPU_MODE_MON;
6406         }
6407         offset = 4;
6408         break;
6409     case EXCP_VIRQ:
6410         new_mode = ARM_CPU_MODE_IRQ;
6411         addr = 0x18;
6412         /* Disable IRQ and imprecise data aborts.  */
6413         mask = CPSR_A | CPSR_I;
6414         offset = 4;
6415         break;
6416     case EXCP_VFIQ:
6417         new_mode = ARM_CPU_MODE_FIQ;
6418         addr = 0x1c;
6419         /* Disable FIQ, IRQ and imprecise data aborts.  */
6420         mask = CPSR_A | CPSR_I | CPSR_F;
6421         offset = 4;
6422         break;
6423     case EXCP_SMC:
6424         new_mode = ARM_CPU_MODE_MON;
6425         addr = 0x08;
6426         mask = CPSR_A | CPSR_I | CPSR_F;
6427         offset = 0;
6428         break;
6429     default:
6430         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
6431         return; /* Never happens.  Keep compiler happy.  */
6432     }
6433 
6434     if (new_mode == ARM_CPU_MODE_MON) {
6435         addr += env->cp15.mvbar;
6436     } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
6437         /* High vectors. When enabled, base address cannot be remapped. */
6438         addr += 0xffff0000;
6439     } else {
6440         /* ARM v7 architectures provide a vector base address register to remap
6441          * the interrupt vector table.
6442          * This register is only followed in non-monitor mode, and is banked.
6443          * Note: only bits 31:5 are valid.
6444          */
6445         addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
6446     }
6447 
6448     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
6449         env->cp15.scr_el3 &= ~SCR_NS;
6450     }
6451 
6452     switch_mode (env, new_mode);
6453     /* For exceptions taken to AArch32 we must clear the SS bit in both
6454      * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
6455      */
6456     env->uncached_cpsr &= ~PSTATE_SS;
6457     env->spsr = cpsr_read(env);
6458     /* Clear IT bits.  */
6459     env->condexec_bits = 0;
6460     /* Switch to the new mode, and to the correct instruction set.  */
6461     env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
6462     /* Set new mode endianness */
6463     env->uncached_cpsr &= ~CPSR_E;
6464     if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
6465         env->uncached_cpsr |= CPSR_E;
6466     }
6467     env->daif |= mask;
6468     /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
6469      * and we should just guard the thumb mode on V4 */
6470     if (arm_feature(env, ARM_FEATURE_V4T)) {
6471         env->thumb = (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
6472     }
6473     env->regs[14] = env->regs[15] + offset;
6474     env->regs[15] = addr;
6475 }
6476 
6477 /* Handle exception entry to a target EL which is using AArch64 */
6478 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
6479 {
6480     ARMCPU *cpu = ARM_CPU(cs);
6481     CPUARMState *env = &cpu->env;
6482     unsigned int new_el = env->exception.target_el;
6483     target_ulong addr = env->cp15.vbar_el[new_el];
6484     unsigned int new_mode = aarch64_pstate_mode(new_el, true);
6485 
6486     if (arm_current_el(env) < new_el) {
6487         /* Entry vector offset depends on whether the implemented EL
6488          * immediately lower than the target level is using AArch32 or AArch64
6489          */
6490         bool is_aa64;
6491 
6492         switch (new_el) {
6493         case 3:
6494             is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
6495             break;
6496         case 2:
6497             is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0;
6498             break;
6499         case 1:
6500             is_aa64 = is_a64(env);
6501             break;
6502         default:
6503             g_assert_not_reached();
6504         }
6505 
6506         if (is_aa64) {
6507             addr += 0x400;
6508         } else {
6509             addr += 0x600;
6510         }
6511     } else if (pstate_read(env) & PSTATE_SP) {
6512         addr += 0x200;
6513     }
6514 
6515     switch (cs->exception_index) {
6516     case EXCP_PREFETCH_ABORT:
6517     case EXCP_DATA_ABORT:
6518         env->cp15.far_el[new_el] = env->exception.vaddress;
6519         qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
6520                       env->cp15.far_el[new_el]);
6521         /* fall through */
6522     case EXCP_BKPT:
6523     case EXCP_UDEF:
6524     case EXCP_SWI:
6525     case EXCP_HVC:
6526     case EXCP_HYP_TRAP:
6527     case EXCP_SMC:
6528         env->cp15.esr_el[new_el] = env->exception.syndrome;
6529         break;
6530     case EXCP_IRQ:
6531     case EXCP_VIRQ:
6532         addr += 0x80;
6533         break;
6534     case EXCP_FIQ:
6535     case EXCP_VFIQ:
6536         addr += 0x100;
6537         break;
6538     case EXCP_SEMIHOST:
6539         qemu_log_mask(CPU_LOG_INT,
6540                       "...handling as semihosting call 0x%" PRIx64 "\n",
6541                       env->xregs[0]);
6542         env->xregs[0] = do_arm_semihosting(env);
6543         return;
6544     default:
6545         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
6546     }
6547 
6548     if (is_a64(env)) {
6549         env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
6550         aarch64_save_sp(env, arm_current_el(env));
6551         env->elr_el[new_el] = env->pc;
6552     } else {
6553         env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
6554         env->elr_el[new_el] = env->regs[15];
6555 
6556         aarch64_sync_32_to_64(env);
6557 
6558         env->condexec_bits = 0;
6559     }
6560     qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
6561                   env->elr_el[new_el]);
6562 
6563     pstate_write(env, PSTATE_DAIF | new_mode);
6564     env->aarch64 = 1;
6565     aarch64_restore_sp(env, new_el);
6566 
6567     env->pc = addr;
6568 
6569     qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
6570                   new_el, env->pc, pstate_read(env));
6571 }
6572 
6573 static inline bool check_for_semihosting(CPUState *cs)
6574 {
6575     /* Check whether this exception is a semihosting call; if so
6576      * then handle it and return true; otherwise return false.
6577      */
6578     ARMCPU *cpu = ARM_CPU(cs);
6579     CPUARMState *env = &cpu->env;
6580 
6581     if (is_a64(env)) {
6582         if (cs->exception_index == EXCP_SEMIHOST) {
6583             /* This is always the 64-bit semihosting exception.
6584              * The "is this usermode" and "is semihosting enabled"
6585              * checks have been done at translate time.
6586              */
6587             qemu_log_mask(CPU_LOG_INT,
6588                           "...handling as semihosting call 0x%" PRIx64 "\n",
6589                           env->xregs[0]);
6590             env->xregs[0] = do_arm_semihosting(env);
6591             return true;
6592         }
6593         return false;
6594     } else {
6595         uint32_t imm;
6596 
6597         /* Only intercept calls from privileged modes, to provide some
6598          * semblance of security.
6599          */
6600         if (cs->exception_index != EXCP_SEMIHOST &&
6601             (!semihosting_enabled() ||
6602              ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) {
6603             return false;
6604         }
6605 
6606         switch (cs->exception_index) {
6607         case EXCP_SEMIHOST:
6608             /* This is always a semihosting call; the "is this usermode"
6609              * and "is semihosting enabled" checks have been done at
6610              * translate time.
6611              */
6612             break;
6613         case EXCP_SWI:
6614             /* Check for semihosting interrupt.  */
6615             if (env->thumb) {
6616                 imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env))
6617                     & 0xff;
6618                 if (imm == 0xab) {
6619                     break;
6620                 }
6621             } else {
6622                 imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env))
6623                     & 0xffffff;
6624                 if (imm == 0x123456) {
6625                     break;
6626                 }
6627             }
6628             return false;
6629         case EXCP_BKPT:
6630             /* See if this is a semihosting syscall.  */
6631             if (env->thumb) {
6632                 imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env))
6633                     & 0xff;
6634                 if (imm == 0xab) {
6635                     env->regs[15] += 2;
6636                     break;
6637                 }
6638             }
6639             return false;
6640         default:
6641             return false;
6642         }
6643 
6644         qemu_log_mask(CPU_LOG_INT,
6645                       "...handling as semihosting call 0x%x\n",
6646                       env->regs[0]);
6647         env->regs[0] = do_arm_semihosting(env);
6648         return true;
6649     }
6650 }
6651 
6652 /* Handle a CPU exception for A and R profile CPUs.
6653  * Do any appropriate logging, handle PSCI calls, and then hand off
6654  * to the AArch64-entry or AArch32-entry function depending on the
6655  * target exception level's register width.
6656  */
6657 void arm_cpu_do_interrupt(CPUState *cs)
6658 {
6659     ARMCPU *cpu = ARM_CPU(cs);
6660     CPUARMState *env = &cpu->env;
6661     unsigned int new_el = env->exception.target_el;
6662 
6663     assert(!IS_M(env));
6664 
6665     arm_log_exception(cs->exception_index);
6666     qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
6667                   new_el);
6668     if (qemu_loglevel_mask(CPU_LOG_INT)
6669         && !excp_is_internal(cs->exception_index)) {
6670         qemu_log_mask(CPU_LOG_INT, "...with ESR %x/0x%" PRIx32 "\n",
6671                       env->exception.syndrome >> ARM_EL_EC_SHIFT,
6672                       env->exception.syndrome);
6673     }
6674 
6675     if (arm_is_psci_call(cpu, cs->exception_index)) {
6676         arm_handle_psci_call(cpu);
6677         qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
6678         return;
6679     }
6680 
6681     /* Semihosting semantics depend on the register width of the
6682      * code that caused the exception, not the target exception level,
6683      * so must be handled here.
6684      */
6685     if (check_for_semihosting(cs)) {
6686         return;
6687     }
6688 
6689     assert(!excp_is_internal(cs->exception_index));
6690     if (arm_el_is_aa64(env, new_el)) {
6691         arm_cpu_do_interrupt_aarch64(cs);
6692     } else {
6693         arm_cpu_do_interrupt_aarch32(cs);
6694     }
6695 
6696     arm_call_el_change_hook(cpu);
6697 
6698     if (!kvm_enabled()) {
6699         cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
6700     }
6701 }
6702 
6703 /* Return the exception level which controls this address translation regime */
6704 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
6705 {
6706     switch (mmu_idx) {
6707     case ARMMMUIdx_S2NS:
6708     case ARMMMUIdx_S1E2:
6709         return 2;
6710     case ARMMMUIdx_S1E3:
6711         return 3;
6712     case ARMMMUIdx_S1SE0:
6713         return arm_el_is_aa64(env, 3) ? 1 : 3;
6714     case ARMMMUIdx_S1SE1:
6715     case ARMMMUIdx_S1NSE0:
6716     case ARMMMUIdx_S1NSE1:
6717         return 1;
6718     default:
6719         g_assert_not_reached();
6720     }
6721 }
6722 
6723 /* Return true if this address translation regime is secure */
6724 static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
6725 {
6726     switch (mmu_idx) {
6727     case ARMMMUIdx_S12NSE0:
6728     case ARMMMUIdx_S12NSE1:
6729     case ARMMMUIdx_S1NSE0:
6730     case ARMMMUIdx_S1NSE1:
6731     case ARMMMUIdx_S1E2:
6732     case ARMMMUIdx_S2NS:
6733         return false;
6734     case ARMMMUIdx_S1E3:
6735     case ARMMMUIdx_S1SE0:
6736     case ARMMMUIdx_S1SE1:
6737         return true;
6738     default:
6739         g_assert_not_reached();
6740     }
6741 }
6742 
6743 /* Return the SCTLR value which controls this address translation regime */
6744 static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
6745 {
6746     return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
6747 }
6748 
6749 /* Return true if the specified stage of address translation is disabled */
6750 static inline bool regime_translation_disabled(CPUARMState *env,
6751                                                ARMMMUIdx mmu_idx)
6752 {
6753     if (mmu_idx == ARMMMUIdx_S2NS) {
6754         return (env->cp15.hcr_el2 & HCR_VM) == 0;
6755     }
6756     return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
6757 }
6758 
6759 static inline bool regime_translation_big_endian(CPUARMState *env,
6760                                                  ARMMMUIdx mmu_idx)
6761 {
6762     return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
6763 }
6764 
6765 /* Return the TCR controlling this translation regime */
6766 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
6767 {
6768     if (mmu_idx == ARMMMUIdx_S2NS) {
6769         return &env->cp15.vtcr_el2;
6770     }
6771     return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
6772 }
6773 
6774 /* Returns TBI0 value for current regime el */
6775 uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx)
6776 {
6777     TCR *tcr;
6778     uint32_t el;
6779 
6780     /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
6781        * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
6782        */
6783     if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
6784         mmu_idx += ARMMMUIdx_S1NSE0;
6785     }
6786 
6787     tcr = regime_tcr(env, mmu_idx);
6788     el = regime_el(env, mmu_idx);
6789 
6790     if (el > 1) {
6791         return extract64(tcr->raw_tcr, 20, 1);
6792     } else {
6793         return extract64(tcr->raw_tcr, 37, 1);
6794     }
6795 }
6796 
6797 /* Returns TBI1 value for current regime el */
6798 uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx)
6799 {
6800     TCR *tcr;
6801     uint32_t el;
6802 
6803     /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
6804        * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
6805        */
6806     if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
6807         mmu_idx += ARMMMUIdx_S1NSE0;
6808     }
6809 
6810     tcr = regime_tcr(env, mmu_idx);
6811     el = regime_el(env, mmu_idx);
6812 
6813     if (el > 1) {
6814         return 0;
6815     } else {
6816         return extract64(tcr->raw_tcr, 38, 1);
6817     }
6818 }
6819 
6820 /* Return the TTBR associated with this translation regime */
6821 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
6822                                    int ttbrn)
6823 {
6824     if (mmu_idx == ARMMMUIdx_S2NS) {
6825         return env->cp15.vttbr_el2;
6826     }
6827     if (ttbrn == 0) {
6828         return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
6829     } else {
6830         return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
6831     }
6832 }
6833 
6834 /* Return true if the translation regime is using LPAE format page tables */
6835 static inline bool regime_using_lpae_format(CPUARMState *env,
6836                                             ARMMMUIdx mmu_idx)
6837 {
6838     int el = regime_el(env, mmu_idx);
6839     if (el == 2 || arm_el_is_aa64(env, el)) {
6840         return true;
6841     }
6842     if (arm_feature(env, ARM_FEATURE_LPAE)
6843         && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
6844         return true;
6845     }
6846     return false;
6847 }
6848 
6849 /* Returns true if the stage 1 translation regime is using LPAE format page
6850  * tables. Used when raising alignment exceptions, whose FSR changes depending
6851  * on whether the long or short descriptor format is in use. */
6852 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
6853 {
6854     if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
6855         mmu_idx += ARMMMUIdx_S1NSE0;
6856     }
6857 
6858     return regime_using_lpae_format(env, mmu_idx);
6859 }
6860 
6861 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
6862 {
6863     switch (mmu_idx) {
6864     case ARMMMUIdx_S1SE0:
6865     case ARMMMUIdx_S1NSE0:
6866         return true;
6867     default:
6868         return false;
6869     case ARMMMUIdx_S12NSE0:
6870     case ARMMMUIdx_S12NSE1:
6871         g_assert_not_reached();
6872     }
6873 }
6874 
6875 /* Translate section/page access permissions to page
6876  * R/W protection flags
6877  *
6878  * @env:         CPUARMState
6879  * @mmu_idx:     MMU index indicating required translation regime
6880  * @ap:          The 3-bit access permissions (AP[2:0])
6881  * @domain_prot: The 2-bit domain access permissions
6882  */
6883 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
6884                                 int ap, int domain_prot)
6885 {
6886     bool is_user = regime_is_user(env, mmu_idx);
6887 
6888     if (domain_prot == 3) {
6889         return PAGE_READ | PAGE_WRITE;
6890     }
6891 
6892     switch (ap) {
6893     case 0:
6894         if (arm_feature(env, ARM_FEATURE_V7)) {
6895             return 0;
6896         }
6897         switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
6898         case SCTLR_S:
6899             return is_user ? 0 : PAGE_READ;
6900         case SCTLR_R:
6901             return PAGE_READ;
6902         default:
6903             return 0;
6904         }
6905     case 1:
6906         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
6907     case 2:
6908         if (is_user) {
6909             return PAGE_READ;
6910         } else {
6911             return PAGE_READ | PAGE_WRITE;
6912         }
6913     case 3:
6914         return PAGE_READ | PAGE_WRITE;
6915     case 4: /* Reserved.  */
6916         return 0;
6917     case 5:
6918         return is_user ? 0 : PAGE_READ;
6919     case 6:
6920         return PAGE_READ;
6921     case 7:
6922         if (!arm_feature(env, ARM_FEATURE_V6K)) {
6923             return 0;
6924         }
6925         return PAGE_READ;
6926     default:
6927         g_assert_not_reached();
6928     }
6929 }
6930 
6931 /* Translate section/page access permissions to page
6932  * R/W protection flags.
6933  *
6934  * @ap:      The 2-bit simple AP (AP[2:1])
6935  * @is_user: TRUE if accessing from PL0
6936  */
6937 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
6938 {
6939     switch (ap) {
6940     case 0:
6941         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
6942     case 1:
6943         return PAGE_READ | PAGE_WRITE;
6944     case 2:
6945         return is_user ? 0 : PAGE_READ;
6946     case 3:
6947         return PAGE_READ;
6948     default:
6949         g_assert_not_reached();
6950     }
6951 }
6952 
6953 static inline int
6954 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
6955 {
6956     return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
6957 }
6958 
6959 /* Translate S2 section/page access permissions to protection flags
6960  *
6961  * @env:     CPUARMState
6962  * @s2ap:    The 2-bit stage2 access permissions (S2AP)
6963  * @xn:      XN (execute-never) bit
6964  */
6965 static int get_S2prot(CPUARMState *env, int s2ap, int xn)
6966 {
6967     int prot = 0;
6968 
6969     if (s2ap & 1) {
6970         prot |= PAGE_READ;
6971     }
6972     if (s2ap & 2) {
6973         prot |= PAGE_WRITE;
6974     }
6975     if (!xn) {
6976         if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
6977             prot |= PAGE_EXEC;
6978         }
6979     }
6980     return prot;
6981 }
6982 
6983 /* Translate section/page access permissions to protection flags
6984  *
6985  * @env:     CPUARMState
6986  * @mmu_idx: MMU index indicating required translation regime
6987  * @is_aa64: TRUE if AArch64
6988  * @ap:      The 2-bit simple AP (AP[2:1])
6989  * @ns:      NS (non-secure) bit
6990  * @xn:      XN (execute-never) bit
6991  * @pxn:     PXN (privileged execute-never) bit
6992  */
6993 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
6994                       int ap, int ns, int xn, int pxn)
6995 {
6996     bool is_user = regime_is_user(env, mmu_idx);
6997     int prot_rw, user_rw;
6998     bool have_wxn;
6999     int wxn = 0;
7000 
7001     assert(mmu_idx != ARMMMUIdx_S2NS);
7002 
7003     user_rw = simple_ap_to_rw_prot_is_user(ap, true);
7004     if (is_user) {
7005         prot_rw = user_rw;
7006     } else {
7007         prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
7008     }
7009 
7010     if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
7011         return prot_rw;
7012     }
7013 
7014     /* TODO have_wxn should be replaced with
7015      *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
7016      * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
7017      * compatible processors have EL2, which is required for [U]WXN.
7018      */
7019     have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
7020 
7021     if (have_wxn) {
7022         wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
7023     }
7024 
7025     if (is_aa64) {
7026         switch (regime_el(env, mmu_idx)) {
7027         case 1:
7028             if (!is_user) {
7029                 xn = pxn || (user_rw & PAGE_WRITE);
7030             }
7031             break;
7032         case 2:
7033         case 3:
7034             break;
7035         }
7036     } else if (arm_feature(env, ARM_FEATURE_V7)) {
7037         switch (regime_el(env, mmu_idx)) {
7038         case 1:
7039         case 3:
7040             if (is_user) {
7041                 xn = xn || !(user_rw & PAGE_READ);
7042             } else {
7043                 int uwxn = 0;
7044                 if (have_wxn) {
7045                     uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
7046                 }
7047                 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
7048                      (uwxn && (user_rw & PAGE_WRITE));
7049             }
7050             break;
7051         case 2:
7052             break;
7053         }
7054     } else {
7055         xn = wxn = 0;
7056     }
7057 
7058     if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
7059         return prot_rw;
7060     }
7061     return prot_rw | PAGE_EXEC;
7062 }
7063 
7064 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
7065                                      uint32_t *table, uint32_t address)
7066 {
7067     /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
7068     TCR *tcr = regime_tcr(env, mmu_idx);
7069 
7070     if (address & tcr->mask) {
7071         if (tcr->raw_tcr & TTBCR_PD1) {
7072             /* Translation table walk disabled for TTBR1 */
7073             return false;
7074         }
7075         *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
7076     } else {
7077         if (tcr->raw_tcr & TTBCR_PD0) {
7078             /* Translation table walk disabled for TTBR0 */
7079             return false;
7080         }
7081         *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
7082     }
7083     *table |= (address >> 18) & 0x3ffc;
7084     return true;
7085 }
7086 
7087 /* Translate a S1 pagetable walk through S2 if needed.  */
7088 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
7089                                hwaddr addr, MemTxAttrs txattrs,
7090                                uint32_t *fsr,
7091                                ARMMMUFaultInfo *fi)
7092 {
7093     if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
7094         !regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
7095         target_ulong s2size;
7096         hwaddr s2pa;
7097         int s2prot;
7098         int ret;
7099 
7100         ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
7101                                  &txattrs, &s2prot, &s2size, fsr, fi);
7102         if (ret) {
7103             fi->s2addr = addr;
7104             fi->stage2 = true;
7105             fi->s1ptw = true;
7106             return ~0;
7107         }
7108         addr = s2pa;
7109     }
7110     return addr;
7111 }
7112 
7113 /* All loads done in the course of a page table walk go through here.
7114  * TODO: rather than ignoring errors from physical memory reads (which
7115  * are external aborts in ARM terminology) we should propagate this
7116  * error out so that we can turn it into a Data Abort if this walk
7117  * was being done for a CPU load/store or an address translation instruction
7118  * (but not if it was for a debug access).
7119  */
7120 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
7121                             ARMMMUIdx mmu_idx, uint32_t *fsr,
7122                             ARMMMUFaultInfo *fi)
7123 {
7124     ARMCPU *cpu = ARM_CPU(cs);
7125     CPUARMState *env = &cpu->env;
7126     MemTxAttrs attrs = {};
7127     AddressSpace *as;
7128 
7129     attrs.secure = is_secure;
7130     as = arm_addressspace(cs, attrs);
7131     addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi);
7132     if (fi->s1ptw) {
7133         return 0;
7134     }
7135     if (regime_translation_big_endian(env, mmu_idx)) {
7136         return address_space_ldl_be(as, addr, attrs, NULL);
7137     } else {
7138         return address_space_ldl_le(as, addr, attrs, NULL);
7139     }
7140 }
7141 
7142 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
7143                             ARMMMUIdx mmu_idx, uint32_t *fsr,
7144                             ARMMMUFaultInfo *fi)
7145 {
7146     ARMCPU *cpu = ARM_CPU(cs);
7147     CPUARMState *env = &cpu->env;
7148     MemTxAttrs attrs = {};
7149     AddressSpace *as;
7150 
7151     attrs.secure = is_secure;
7152     as = arm_addressspace(cs, attrs);
7153     addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi);
7154     if (fi->s1ptw) {
7155         return 0;
7156     }
7157     if (regime_translation_big_endian(env, mmu_idx)) {
7158         return address_space_ldq_be(as, addr, attrs, NULL);
7159     } else {
7160         return address_space_ldq_le(as, addr, attrs, NULL);
7161     }
7162 }
7163 
7164 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
7165                              int access_type, ARMMMUIdx mmu_idx,
7166                              hwaddr *phys_ptr, int *prot,
7167                              target_ulong *page_size, uint32_t *fsr,
7168                              ARMMMUFaultInfo *fi)
7169 {
7170     CPUState *cs = CPU(arm_env_get_cpu(env));
7171     int code;
7172     uint32_t table;
7173     uint32_t desc;
7174     int type;
7175     int ap;
7176     int domain = 0;
7177     int domain_prot;
7178     hwaddr phys_addr;
7179     uint32_t dacr;
7180 
7181     /* Pagetable walk.  */
7182     /* Lookup l1 descriptor.  */
7183     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
7184         /* Section translation fault if page walk is disabled by PD0 or PD1 */
7185         code = 5;
7186         goto do_fault;
7187     }
7188     desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
7189                        mmu_idx, fsr, fi);
7190     type = (desc & 3);
7191     domain = (desc >> 5) & 0x0f;
7192     if (regime_el(env, mmu_idx) == 1) {
7193         dacr = env->cp15.dacr_ns;
7194     } else {
7195         dacr = env->cp15.dacr_s;
7196     }
7197     domain_prot = (dacr >> (domain * 2)) & 3;
7198     if (type == 0) {
7199         /* Section translation fault.  */
7200         code = 5;
7201         goto do_fault;
7202     }
7203     if (domain_prot == 0 || domain_prot == 2) {
7204         if (type == 2)
7205             code = 9; /* Section domain fault.  */
7206         else
7207             code = 11; /* Page domain fault.  */
7208         goto do_fault;
7209     }
7210     if (type == 2) {
7211         /* 1Mb section.  */
7212         phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
7213         ap = (desc >> 10) & 3;
7214         code = 13;
7215         *page_size = 1024 * 1024;
7216     } else {
7217         /* Lookup l2 entry.  */
7218         if (type == 1) {
7219             /* Coarse pagetable.  */
7220             table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
7221         } else {
7222             /* Fine pagetable.  */
7223             table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
7224         }
7225         desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
7226                            mmu_idx, fsr, fi);
7227         switch (desc & 3) {
7228         case 0: /* Page translation fault.  */
7229             code = 7;
7230             goto do_fault;
7231         case 1: /* 64k page.  */
7232             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
7233             ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
7234             *page_size = 0x10000;
7235             break;
7236         case 2: /* 4k page.  */
7237             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
7238             ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
7239             *page_size = 0x1000;
7240             break;
7241         case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
7242             if (type == 1) {
7243                 /* ARMv6/XScale extended small page format */
7244                 if (arm_feature(env, ARM_FEATURE_XSCALE)
7245                     || arm_feature(env, ARM_FEATURE_V6)) {
7246                     phys_addr = (desc & 0xfffff000) | (address & 0xfff);
7247                     *page_size = 0x1000;
7248                 } else {
7249                     /* UNPREDICTABLE in ARMv5; we choose to take a
7250                      * page translation fault.
7251                      */
7252                     code = 7;
7253                     goto do_fault;
7254                 }
7255             } else {
7256                 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
7257                 *page_size = 0x400;
7258             }
7259             ap = (desc >> 4) & 3;
7260             break;
7261         default:
7262             /* Never happens, but compiler isn't smart enough to tell.  */
7263             abort();
7264         }
7265         code = 15;
7266     }
7267     *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
7268     *prot |= *prot ? PAGE_EXEC : 0;
7269     if (!(*prot & (1 << access_type))) {
7270         /* Access permission fault.  */
7271         goto do_fault;
7272     }
7273     *phys_ptr = phys_addr;
7274     return false;
7275 do_fault:
7276     *fsr = code | (domain << 4);
7277     return true;
7278 }
7279 
7280 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
7281                              int access_type, ARMMMUIdx mmu_idx,
7282                              hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
7283                              target_ulong *page_size, uint32_t *fsr,
7284                              ARMMMUFaultInfo *fi)
7285 {
7286     CPUState *cs = CPU(arm_env_get_cpu(env));
7287     int code;
7288     uint32_t table;
7289     uint32_t desc;
7290     uint32_t xn;
7291     uint32_t pxn = 0;
7292     int type;
7293     int ap;
7294     int domain = 0;
7295     int domain_prot;
7296     hwaddr phys_addr;
7297     uint32_t dacr;
7298     bool ns;
7299 
7300     /* Pagetable walk.  */
7301     /* Lookup l1 descriptor.  */
7302     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
7303         /* Section translation fault if page walk is disabled by PD0 or PD1 */
7304         code = 5;
7305         goto do_fault;
7306     }
7307     desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
7308                        mmu_idx, fsr, fi);
7309     type = (desc & 3);
7310     if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
7311         /* Section translation fault, or attempt to use the encoding
7312          * which is Reserved on implementations without PXN.
7313          */
7314         code = 5;
7315         goto do_fault;
7316     }
7317     if ((type == 1) || !(desc & (1 << 18))) {
7318         /* Page or Section.  */
7319         domain = (desc >> 5) & 0x0f;
7320     }
7321     if (regime_el(env, mmu_idx) == 1) {
7322         dacr = env->cp15.dacr_ns;
7323     } else {
7324         dacr = env->cp15.dacr_s;
7325     }
7326     domain_prot = (dacr >> (domain * 2)) & 3;
7327     if (domain_prot == 0 || domain_prot == 2) {
7328         if (type != 1) {
7329             code = 9; /* Section domain fault.  */
7330         } else {
7331             code = 11; /* Page domain fault.  */
7332         }
7333         goto do_fault;
7334     }
7335     if (type != 1) {
7336         if (desc & (1 << 18)) {
7337             /* Supersection.  */
7338             phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
7339             phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
7340             phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
7341             *page_size = 0x1000000;
7342         } else {
7343             /* Section.  */
7344             phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
7345             *page_size = 0x100000;
7346         }
7347         ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
7348         xn = desc & (1 << 4);
7349         pxn = desc & 1;
7350         code = 13;
7351         ns = extract32(desc, 19, 1);
7352     } else {
7353         if (arm_feature(env, ARM_FEATURE_PXN)) {
7354             pxn = (desc >> 2) & 1;
7355         }
7356         ns = extract32(desc, 3, 1);
7357         /* Lookup l2 entry.  */
7358         table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
7359         desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
7360                            mmu_idx, fsr, fi);
7361         ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
7362         switch (desc & 3) {
7363         case 0: /* Page translation fault.  */
7364             code = 7;
7365             goto do_fault;
7366         case 1: /* 64k page.  */
7367             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
7368             xn = desc & (1 << 15);
7369             *page_size = 0x10000;
7370             break;
7371         case 2: case 3: /* 4k page.  */
7372             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
7373             xn = desc & 1;
7374             *page_size = 0x1000;
7375             break;
7376         default:
7377             /* Never happens, but compiler isn't smart enough to tell.  */
7378             abort();
7379         }
7380         code = 15;
7381     }
7382     if (domain_prot == 3) {
7383         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
7384     } else {
7385         if (pxn && !regime_is_user(env, mmu_idx)) {
7386             xn = 1;
7387         }
7388         if (xn && access_type == 2)
7389             goto do_fault;
7390 
7391         if (arm_feature(env, ARM_FEATURE_V6K) &&
7392                 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
7393             /* The simplified model uses AP[0] as an access control bit.  */
7394             if ((ap & 1) == 0) {
7395                 /* Access flag fault.  */
7396                 code = (code == 15) ? 6 : 3;
7397                 goto do_fault;
7398             }
7399             *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
7400         } else {
7401             *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
7402         }
7403         if (*prot && !xn) {
7404             *prot |= PAGE_EXEC;
7405         }
7406         if (!(*prot & (1 << access_type))) {
7407             /* Access permission fault.  */
7408             goto do_fault;
7409         }
7410     }
7411     if (ns) {
7412         /* The NS bit will (as required by the architecture) have no effect if
7413          * the CPU doesn't support TZ or this is a non-secure translation
7414          * regime, because the attribute will already be non-secure.
7415          */
7416         attrs->secure = false;
7417     }
7418     *phys_ptr = phys_addr;
7419     return false;
7420 do_fault:
7421     *fsr = code | (domain << 4);
7422     return true;
7423 }
7424 
7425 /* Fault type for long-descriptor MMU fault reporting; this corresponds
7426  * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
7427  */
7428 typedef enum {
7429     translation_fault = 1,
7430     access_fault = 2,
7431     permission_fault = 3,
7432 } MMUFaultType;
7433 
7434 /*
7435  * check_s2_mmu_setup
7436  * @cpu:        ARMCPU
7437  * @is_aa64:    True if the translation regime is in AArch64 state
7438  * @startlevel: Suggested starting level
7439  * @inputsize:  Bitsize of IPAs
7440  * @stride:     Page-table stride (See the ARM ARM)
7441  *
7442  * Returns true if the suggested S2 translation parameters are OK and
7443  * false otherwise.
7444  */
7445 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
7446                                int inputsize, int stride)
7447 {
7448     const int grainsize = stride + 3;
7449     int startsizecheck;
7450 
7451     /* Negative levels are never allowed.  */
7452     if (level < 0) {
7453         return false;
7454     }
7455 
7456     startsizecheck = inputsize - ((3 - level) * stride + grainsize);
7457     if (startsizecheck < 1 || startsizecheck > stride + 4) {
7458         return false;
7459     }
7460 
7461     if (is_aa64) {
7462         CPUARMState *env = &cpu->env;
7463         unsigned int pamax = arm_pamax(cpu);
7464 
7465         switch (stride) {
7466         case 13: /* 64KB Pages.  */
7467             if (level == 0 || (level == 1 && pamax <= 42)) {
7468                 return false;
7469             }
7470             break;
7471         case 11: /* 16KB Pages.  */
7472             if (level == 0 || (level == 1 && pamax <= 40)) {
7473                 return false;
7474             }
7475             break;
7476         case 9: /* 4KB Pages.  */
7477             if (level == 0 && pamax <= 42) {
7478                 return false;
7479             }
7480             break;
7481         default:
7482             g_assert_not_reached();
7483         }
7484 
7485         /* Inputsize checks.  */
7486         if (inputsize > pamax &&
7487             (arm_el_is_aa64(env, 1) || inputsize > 40)) {
7488             /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
7489             return false;
7490         }
7491     } else {
7492         /* AArch32 only supports 4KB pages. Assert on that.  */
7493         assert(stride == 9);
7494 
7495         if (level == 0) {
7496             return false;
7497         }
7498     }
7499     return true;
7500 }
7501 
7502 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
7503                                int access_type, ARMMMUIdx mmu_idx,
7504                                hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
7505                                target_ulong *page_size_ptr, uint32_t *fsr,
7506                                ARMMMUFaultInfo *fi)
7507 {
7508     ARMCPU *cpu = arm_env_get_cpu(env);
7509     CPUState *cs = CPU(cpu);
7510     /* Read an LPAE long-descriptor translation table. */
7511     MMUFaultType fault_type = translation_fault;
7512     uint32_t level;
7513     uint32_t epd = 0;
7514     int32_t t0sz, t1sz;
7515     uint32_t tg;
7516     uint64_t ttbr;
7517     int ttbr_select;
7518     hwaddr descaddr, indexmask, indexmask_grainsize;
7519     uint32_t tableattrs;
7520     target_ulong page_size;
7521     uint32_t attrs;
7522     int32_t stride = 9;
7523     int32_t addrsize;
7524     int inputsize;
7525     int32_t tbi = 0;
7526     TCR *tcr = regime_tcr(env, mmu_idx);
7527     int ap, ns, xn, pxn;
7528     uint32_t el = regime_el(env, mmu_idx);
7529     bool ttbr1_valid = true;
7530     uint64_t descaddrmask;
7531     bool aarch64 = arm_el_is_aa64(env, el);
7532 
7533     /* TODO:
7534      * This code does not handle the different format TCR for VTCR_EL2.
7535      * This code also does not support shareability levels.
7536      * Attribute and permission bit handling should also be checked when adding
7537      * support for those page table walks.
7538      */
7539     if (aarch64) {
7540         level = 0;
7541         addrsize = 64;
7542         if (el > 1) {
7543             if (mmu_idx != ARMMMUIdx_S2NS) {
7544                 tbi = extract64(tcr->raw_tcr, 20, 1);
7545             }
7546         } else {
7547             if (extract64(address, 55, 1)) {
7548                 tbi = extract64(tcr->raw_tcr, 38, 1);
7549             } else {
7550                 tbi = extract64(tcr->raw_tcr, 37, 1);
7551             }
7552         }
7553         tbi *= 8;
7554 
7555         /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
7556          * invalid.
7557          */
7558         if (el > 1) {
7559             ttbr1_valid = false;
7560         }
7561     } else {
7562         level = 1;
7563         addrsize = 32;
7564         /* There is no TTBR1 for EL2 */
7565         if (el == 2) {
7566             ttbr1_valid = false;
7567         }
7568     }
7569 
7570     /* Determine whether this address is in the region controlled by
7571      * TTBR0 or TTBR1 (or if it is in neither region and should fault).
7572      * This is a Non-secure PL0/1 stage 1 translation, so controlled by
7573      * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
7574      */
7575     if (aarch64) {
7576         /* AArch64 translation.  */
7577         t0sz = extract32(tcr->raw_tcr, 0, 6);
7578         t0sz = MIN(t0sz, 39);
7579         t0sz = MAX(t0sz, 16);
7580     } else if (mmu_idx != ARMMMUIdx_S2NS) {
7581         /* AArch32 stage 1 translation.  */
7582         t0sz = extract32(tcr->raw_tcr, 0, 3);
7583     } else {
7584         /* AArch32 stage 2 translation.  */
7585         bool sext = extract32(tcr->raw_tcr, 4, 1);
7586         bool sign = extract32(tcr->raw_tcr, 3, 1);
7587         /* Address size is 40-bit for a stage 2 translation,
7588          * and t0sz can be negative (from -8 to 7),
7589          * so we need to adjust it to use the TTBR selecting logic below.
7590          */
7591         addrsize = 40;
7592         t0sz = sextract32(tcr->raw_tcr, 0, 4) + 8;
7593 
7594         /* If the sign-extend bit is not the same as t0sz[3], the result
7595          * is unpredictable. Flag this as a guest error.  */
7596         if (sign != sext) {
7597             qemu_log_mask(LOG_GUEST_ERROR,
7598                           "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
7599         }
7600     }
7601     t1sz = extract32(tcr->raw_tcr, 16, 6);
7602     if (aarch64) {
7603         t1sz = MIN(t1sz, 39);
7604         t1sz = MAX(t1sz, 16);
7605     }
7606     if (t0sz && !extract64(address, addrsize - t0sz, t0sz - tbi)) {
7607         /* there is a ttbr0 region and we are in it (high bits all zero) */
7608         ttbr_select = 0;
7609     } else if (ttbr1_valid && t1sz &&
7610                !extract64(~address, addrsize - t1sz, t1sz - tbi)) {
7611         /* there is a ttbr1 region and we are in it (high bits all one) */
7612         ttbr_select = 1;
7613     } else if (!t0sz) {
7614         /* ttbr0 region is "everything not in the ttbr1 region" */
7615         ttbr_select = 0;
7616     } else if (!t1sz && ttbr1_valid) {
7617         /* ttbr1 region is "everything not in the ttbr0 region" */
7618         ttbr_select = 1;
7619     } else {
7620         /* in the gap between the two regions, this is a Translation fault */
7621         fault_type = translation_fault;
7622         goto do_fault;
7623     }
7624 
7625     /* Note that QEMU ignores shareability and cacheability attributes,
7626      * so we don't need to do anything with the SH, ORGN, IRGN fields
7627      * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
7628      * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
7629      * implement any ASID-like capability so we can ignore it (instead
7630      * we will always flush the TLB any time the ASID is changed).
7631      */
7632     if (ttbr_select == 0) {
7633         ttbr = regime_ttbr(env, mmu_idx, 0);
7634         if (el < 2) {
7635             epd = extract32(tcr->raw_tcr, 7, 1);
7636         }
7637         inputsize = addrsize - t0sz;
7638 
7639         tg = extract32(tcr->raw_tcr, 14, 2);
7640         if (tg == 1) { /* 64KB pages */
7641             stride = 13;
7642         }
7643         if (tg == 2) { /* 16KB pages */
7644             stride = 11;
7645         }
7646     } else {
7647         /* We should only be here if TTBR1 is valid */
7648         assert(ttbr1_valid);
7649 
7650         ttbr = regime_ttbr(env, mmu_idx, 1);
7651         epd = extract32(tcr->raw_tcr, 23, 1);
7652         inputsize = addrsize - t1sz;
7653 
7654         tg = extract32(tcr->raw_tcr, 30, 2);
7655         if (tg == 3)  { /* 64KB pages */
7656             stride = 13;
7657         }
7658         if (tg == 1) { /* 16KB pages */
7659             stride = 11;
7660         }
7661     }
7662 
7663     /* Here we should have set up all the parameters for the translation:
7664      * inputsize, ttbr, epd, stride, tbi
7665      */
7666 
7667     if (epd) {
7668         /* Translation table walk disabled => Translation fault on TLB miss
7669          * Note: This is always 0 on 64-bit EL2 and EL3.
7670          */
7671         goto do_fault;
7672     }
7673 
7674     if (mmu_idx != ARMMMUIdx_S2NS) {
7675         /* The starting level depends on the virtual address size (which can
7676          * be up to 48 bits) and the translation granule size. It indicates
7677          * the number of strides (stride bits at a time) needed to
7678          * consume the bits of the input address. In the pseudocode this is:
7679          *  level = 4 - RoundUp((inputsize - grainsize) / stride)
7680          * where their 'inputsize' is our 'inputsize', 'grainsize' is
7681          * our 'stride + 3' and 'stride' is our 'stride'.
7682          * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
7683          * = 4 - (inputsize - stride - 3 + stride - 1) / stride
7684          * = 4 - (inputsize - 4) / stride;
7685          */
7686         level = 4 - (inputsize - 4) / stride;
7687     } else {
7688         /* For stage 2 translations the starting level is specified by the
7689          * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
7690          */
7691         uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
7692         uint32_t startlevel;
7693         bool ok;
7694 
7695         if (!aarch64 || stride == 9) {
7696             /* AArch32 or 4KB pages */
7697             startlevel = 2 - sl0;
7698         } else {
7699             /* 16KB or 64KB pages */
7700             startlevel = 3 - sl0;
7701         }
7702 
7703         /* Check that the starting level is valid. */
7704         ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
7705                                 inputsize, stride);
7706         if (!ok) {
7707             fault_type = translation_fault;
7708             goto do_fault;
7709         }
7710         level = startlevel;
7711     }
7712 
7713     indexmask_grainsize = (1ULL << (stride + 3)) - 1;
7714     indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
7715 
7716     /* Now we can extract the actual base address from the TTBR */
7717     descaddr = extract64(ttbr, 0, 48);
7718     descaddr &= ~indexmask;
7719 
7720     /* The address field in the descriptor goes up to bit 39 for ARMv7
7721      * but up to bit 47 for ARMv8, but we use the descaddrmask
7722      * up to bit 39 for AArch32, because we don't need other bits in that case
7723      * to construct next descriptor address (anyway they should be all zeroes).
7724      */
7725     descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
7726                    ~indexmask_grainsize;
7727 
7728     /* Secure accesses start with the page table in secure memory and
7729      * can be downgraded to non-secure at any step. Non-secure accesses
7730      * remain non-secure. We implement this by just ORing in the NSTable/NS
7731      * bits at each step.
7732      */
7733     tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
7734     for (;;) {
7735         uint64_t descriptor;
7736         bool nstable;
7737 
7738         descaddr |= (address >> (stride * (4 - level))) & indexmask;
7739         descaddr &= ~7ULL;
7740         nstable = extract32(tableattrs, 4, 1);
7741         descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fsr, fi);
7742         if (fi->s1ptw) {
7743             goto do_fault;
7744         }
7745 
7746         if (!(descriptor & 1) ||
7747             (!(descriptor & 2) && (level == 3))) {
7748             /* Invalid, or the Reserved level 3 encoding */
7749             goto do_fault;
7750         }
7751         descaddr = descriptor & descaddrmask;
7752 
7753         if ((descriptor & 2) && (level < 3)) {
7754             /* Table entry. The top five bits are attributes which  may
7755              * propagate down through lower levels of the table (and
7756              * which are all arranged so that 0 means "no effect", so
7757              * we can gather them up by ORing in the bits at each level).
7758              */
7759             tableattrs |= extract64(descriptor, 59, 5);
7760             level++;
7761             indexmask = indexmask_grainsize;
7762             continue;
7763         }
7764         /* Block entry at level 1 or 2, or page entry at level 3.
7765          * These are basically the same thing, although the number
7766          * of bits we pull in from the vaddr varies.
7767          */
7768         page_size = (1ULL << ((stride * (4 - level)) + 3));
7769         descaddr |= (address & (page_size - 1));
7770         /* Extract attributes from the descriptor */
7771         attrs = extract64(descriptor, 2, 10)
7772             | (extract64(descriptor, 52, 12) << 10);
7773 
7774         if (mmu_idx == ARMMMUIdx_S2NS) {
7775             /* Stage 2 table descriptors do not include any attribute fields */
7776             break;
7777         }
7778         /* Merge in attributes from table descriptors */
7779         attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
7780         attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
7781         /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
7782          * means "force PL1 access only", which means forcing AP[1] to 0.
7783          */
7784         if (extract32(tableattrs, 2, 1)) {
7785             attrs &= ~(1 << 4);
7786         }
7787         attrs |= nstable << 3; /* NS */
7788         break;
7789     }
7790     /* Here descaddr is the final physical address, and attributes
7791      * are all in attrs.
7792      */
7793     fault_type = access_fault;
7794     if ((attrs & (1 << 8)) == 0) {
7795         /* Access flag */
7796         goto do_fault;
7797     }
7798 
7799     ap = extract32(attrs, 4, 2);
7800     xn = extract32(attrs, 12, 1);
7801 
7802     if (mmu_idx == ARMMMUIdx_S2NS) {
7803         ns = true;
7804         *prot = get_S2prot(env, ap, xn);
7805     } else {
7806         ns = extract32(attrs, 3, 1);
7807         pxn = extract32(attrs, 11, 1);
7808         *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
7809     }
7810 
7811     fault_type = permission_fault;
7812     if (!(*prot & (1 << access_type))) {
7813         goto do_fault;
7814     }
7815 
7816     if (ns) {
7817         /* The NS bit will (as required by the architecture) have no effect if
7818          * the CPU doesn't support TZ or this is a non-secure translation
7819          * regime, because the attribute will already be non-secure.
7820          */
7821         txattrs->secure = false;
7822     }
7823     *phys_ptr = descaddr;
7824     *page_size_ptr = page_size;
7825     return false;
7826 
7827 do_fault:
7828     /* Long-descriptor format IFSR/DFSR value */
7829     *fsr = (1 << 9) | (fault_type << 2) | level;
7830     /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
7831     fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
7832     return true;
7833 }
7834 
7835 static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
7836                                                 ARMMMUIdx mmu_idx,
7837                                                 int32_t address, int *prot)
7838 {
7839     *prot = PAGE_READ | PAGE_WRITE;
7840     switch (address) {
7841     case 0xF0000000 ... 0xFFFFFFFF:
7842         if (regime_sctlr(env, mmu_idx) & SCTLR_V) { /* hivecs execing is ok */
7843             *prot |= PAGE_EXEC;
7844         }
7845         break;
7846     case 0x00000000 ... 0x7FFFFFFF:
7847         *prot |= PAGE_EXEC;
7848         break;
7849     }
7850 
7851 }
7852 
7853 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
7854                                  int access_type, ARMMMUIdx mmu_idx,
7855                                  hwaddr *phys_ptr, int *prot, uint32_t *fsr)
7856 {
7857     ARMCPU *cpu = arm_env_get_cpu(env);
7858     int n;
7859     bool is_user = regime_is_user(env, mmu_idx);
7860 
7861     *phys_ptr = address;
7862     *prot = 0;
7863 
7864     if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
7865         get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
7866     } else { /* MPU enabled */
7867         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
7868             /* region search */
7869             uint32_t base = env->pmsav7.drbar[n];
7870             uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
7871             uint32_t rmask;
7872             bool srdis = false;
7873 
7874             if (!(env->pmsav7.drsr[n] & 0x1)) {
7875                 continue;
7876             }
7877 
7878             if (!rsize) {
7879                 qemu_log_mask(LOG_GUEST_ERROR, "DRSR.Rsize field can not be 0");
7880                 continue;
7881             }
7882             rsize++;
7883             rmask = (1ull << rsize) - 1;
7884 
7885             if (base & rmask) {
7886                 qemu_log_mask(LOG_GUEST_ERROR, "DRBAR %" PRIx32 " misaligned "
7887                               "to DRSR region size, mask = %" PRIx32,
7888                               base, rmask);
7889                 continue;
7890             }
7891 
7892             if (address < base || address > base + rmask) {
7893                 continue;
7894             }
7895 
7896             /* Region matched */
7897 
7898             if (rsize >= 8) { /* no subregions for regions < 256 bytes */
7899                 int i, snd;
7900                 uint32_t srdis_mask;
7901 
7902                 rsize -= 3; /* sub region size (power of 2) */
7903                 snd = ((address - base) >> rsize) & 0x7;
7904                 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
7905 
7906                 srdis_mask = srdis ? 0x3 : 0x0;
7907                 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
7908                     /* This will check in groups of 2, 4 and then 8, whether
7909                      * the subregion bits are consistent. rsize is incremented
7910                      * back up to give the region size, considering consistent
7911                      * adjacent subregions as one region. Stop testing if rsize
7912                      * is already big enough for an entire QEMU page.
7913                      */
7914                     int snd_rounded = snd & ~(i - 1);
7915                     uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
7916                                                      snd_rounded + 8, i);
7917                     if (srdis_mask ^ srdis_multi) {
7918                         break;
7919                     }
7920                     srdis_mask = (srdis_mask << i) | srdis_mask;
7921                     rsize++;
7922                 }
7923             }
7924             if (rsize < TARGET_PAGE_BITS) {
7925                 qemu_log_mask(LOG_UNIMP, "No support for MPU (sub)region"
7926                               "alignment of %" PRIu32 " bits. Minimum is %d\n",
7927                               rsize, TARGET_PAGE_BITS);
7928                 continue;
7929             }
7930             if (srdis) {
7931                 continue;
7932             }
7933             break;
7934         }
7935 
7936         if (n == -1) { /* no hits */
7937             if (cpu->pmsav7_dregion &&
7938                 (is_user || !(regime_sctlr(env, mmu_idx) & SCTLR_BR))) {
7939                 /* background fault */
7940                 *fsr = 0;
7941                 return true;
7942             }
7943             get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
7944         } else { /* a MPU hit! */
7945             uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
7946 
7947             if (is_user) { /* User mode AP bit decoding */
7948                 switch (ap) {
7949                 case 0:
7950                 case 1:
7951                 case 5:
7952                     break; /* no access */
7953                 case 3:
7954                     *prot |= PAGE_WRITE;
7955                     /* fall through */
7956                 case 2:
7957                 case 6:
7958                     *prot |= PAGE_READ | PAGE_EXEC;
7959                     break;
7960                 default:
7961                     qemu_log_mask(LOG_GUEST_ERROR,
7962                                   "Bad value for AP bits in DRACR %"
7963                                   PRIx32 "\n", ap);
7964                 }
7965             } else { /* Priv. mode AP bits decoding */
7966                 switch (ap) {
7967                 case 0:
7968                     break; /* no access */
7969                 case 1:
7970                 case 2:
7971                 case 3:
7972                     *prot |= PAGE_WRITE;
7973                     /* fall through */
7974                 case 5:
7975                 case 6:
7976                     *prot |= PAGE_READ | PAGE_EXEC;
7977                     break;
7978                 default:
7979                     qemu_log_mask(LOG_GUEST_ERROR,
7980                                   "Bad value for AP bits in DRACR %"
7981                                   PRIx32 "\n", ap);
7982                 }
7983             }
7984 
7985             /* execute never */
7986             if (env->pmsav7.dracr[n] & (1 << 12)) {
7987                 *prot &= ~PAGE_EXEC;
7988             }
7989         }
7990     }
7991 
7992     *fsr = 0x00d; /* Permission fault */
7993     return !(*prot & (1 << access_type));
7994 }
7995 
7996 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
7997                                  int access_type, ARMMMUIdx mmu_idx,
7998                                  hwaddr *phys_ptr, int *prot, uint32_t *fsr)
7999 {
8000     int n;
8001     uint32_t mask;
8002     uint32_t base;
8003     bool is_user = regime_is_user(env, mmu_idx);
8004 
8005     *phys_ptr = address;
8006     for (n = 7; n >= 0; n--) {
8007         base = env->cp15.c6_region[n];
8008         if ((base & 1) == 0) {
8009             continue;
8010         }
8011         mask = 1 << ((base >> 1) & 0x1f);
8012         /* Keep this shift separate from the above to avoid an
8013            (undefined) << 32.  */
8014         mask = (mask << 1) - 1;
8015         if (((base ^ address) & ~mask) == 0) {
8016             break;
8017         }
8018     }
8019     if (n < 0) {
8020         *fsr = 2;
8021         return true;
8022     }
8023 
8024     if (access_type == 2) {
8025         mask = env->cp15.pmsav5_insn_ap;
8026     } else {
8027         mask = env->cp15.pmsav5_data_ap;
8028     }
8029     mask = (mask >> (n * 4)) & 0xf;
8030     switch (mask) {
8031     case 0:
8032         *fsr = 1;
8033         return true;
8034     case 1:
8035         if (is_user) {
8036             *fsr = 1;
8037             return true;
8038         }
8039         *prot = PAGE_READ | PAGE_WRITE;
8040         break;
8041     case 2:
8042         *prot = PAGE_READ;
8043         if (!is_user) {
8044             *prot |= PAGE_WRITE;
8045         }
8046         break;
8047     case 3:
8048         *prot = PAGE_READ | PAGE_WRITE;
8049         break;
8050     case 5:
8051         if (is_user) {
8052             *fsr = 1;
8053             return true;
8054         }
8055         *prot = PAGE_READ;
8056         break;
8057     case 6:
8058         *prot = PAGE_READ;
8059         break;
8060     default:
8061         /* Bad permission.  */
8062         *fsr = 1;
8063         return true;
8064     }
8065     *prot |= PAGE_EXEC;
8066     return false;
8067 }
8068 
8069 /* get_phys_addr - get the physical address for this virtual address
8070  *
8071  * Find the physical address corresponding to the given virtual address,
8072  * by doing a translation table walk on MMU based systems or using the
8073  * MPU state on MPU based systems.
8074  *
8075  * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
8076  * prot and page_size may not be filled in, and the populated fsr value provides
8077  * information on why the translation aborted, in the format of a
8078  * DFSR/IFSR fault register, with the following caveats:
8079  *  * we honour the short vs long DFSR format differences.
8080  *  * the WnR bit is never set (the caller must do this).
8081  *  * for PSMAv5 based systems we don't bother to return a full FSR format
8082  *    value.
8083  *
8084  * @env: CPUARMState
8085  * @address: virtual address to get physical address for
8086  * @access_type: 0 for read, 1 for write, 2 for execute
8087  * @mmu_idx: MMU index indicating required translation regime
8088  * @phys_ptr: set to the physical address corresponding to the virtual address
8089  * @attrs: set to the memory transaction attributes to use
8090  * @prot: set to the permissions for the page containing phys_ptr
8091  * @page_size: set to the size of the page containing phys_ptr
8092  * @fsr: set to the DFSR/IFSR value on failure
8093  */
8094 static bool get_phys_addr(CPUARMState *env, target_ulong address,
8095                           int access_type, ARMMMUIdx mmu_idx,
8096                           hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
8097                           target_ulong *page_size, uint32_t *fsr,
8098                           ARMMMUFaultInfo *fi)
8099 {
8100     if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
8101         /* Call ourselves recursively to do the stage 1 and then stage 2
8102          * translations.
8103          */
8104         if (arm_feature(env, ARM_FEATURE_EL2)) {
8105             hwaddr ipa;
8106             int s2_prot;
8107             int ret;
8108 
8109             ret = get_phys_addr(env, address, access_type,
8110                                 mmu_idx + ARMMMUIdx_S1NSE0, &ipa, attrs,
8111                                 prot, page_size, fsr, fi);
8112 
8113             /* If S1 fails or S2 is disabled, return early.  */
8114             if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
8115                 *phys_ptr = ipa;
8116                 return ret;
8117             }
8118 
8119             /* S1 is done. Now do S2 translation.  */
8120             ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
8121                                      phys_ptr, attrs, &s2_prot,
8122                                      page_size, fsr, fi);
8123             fi->s2addr = ipa;
8124             /* Combine the S1 and S2 perms.  */
8125             *prot &= s2_prot;
8126             return ret;
8127         } else {
8128             /*
8129              * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
8130              */
8131             mmu_idx += ARMMMUIdx_S1NSE0;
8132         }
8133     }
8134 
8135     /* The page table entries may downgrade secure to non-secure, but
8136      * cannot upgrade an non-secure translation regime's attributes
8137      * to secure.
8138      */
8139     attrs->secure = regime_is_secure(env, mmu_idx);
8140     attrs->user = regime_is_user(env, mmu_idx);
8141 
8142     /* Fast Context Switch Extension. This doesn't exist at all in v8.
8143      * In v7 and earlier it affects all stage 1 translations.
8144      */
8145     if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS
8146         && !arm_feature(env, ARM_FEATURE_V8)) {
8147         if (regime_el(env, mmu_idx) == 3) {
8148             address += env->cp15.fcseidr_s;
8149         } else {
8150             address += env->cp15.fcseidr_ns;
8151         }
8152     }
8153 
8154     /* pmsav7 has special handling for when MPU is disabled so call it before
8155      * the common MMU/MPU disabled check below.
8156      */
8157     if (arm_feature(env, ARM_FEATURE_MPU) &&
8158         arm_feature(env, ARM_FEATURE_V7)) {
8159         *page_size = TARGET_PAGE_SIZE;
8160         return get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
8161                                     phys_ptr, prot, fsr);
8162     }
8163 
8164     if (regime_translation_disabled(env, mmu_idx)) {
8165         /* MMU/MPU disabled.  */
8166         *phys_ptr = address;
8167         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
8168         *page_size = TARGET_PAGE_SIZE;
8169         return 0;
8170     }
8171 
8172     if (arm_feature(env, ARM_FEATURE_MPU)) {
8173         /* Pre-v7 MPU */
8174         *page_size = TARGET_PAGE_SIZE;
8175         return get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
8176                                     phys_ptr, prot, fsr);
8177     }
8178 
8179     if (regime_using_lpae_format(env, mmu_idx)) {
8180         return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
8181                                   attrs, prot, page_size, fsr, fi);
8182     } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
8183         return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr,
8184                                 attrs, prot, page_size, fsr, fi);
8185     } else {
8186         return get_phys_addr_v5(env, address, access_type, mmu_idx, phys_ptr,
8187                                 prot, page_size, fsr, fi);
8188     }
8189 }
8190 
8191 /* Walk the page table and (if the mapping exists) add the page
8192  * to the TLB. Return false on success, or true on failure. Populate
8193  * fsr with ARM DFSR/IFSR fault register format value on failure.
8194  */
8195 bool arm_tlb_fill(CPUState *cs, vaddr address,
8196                   int access_type, int mmu_idx, uint32_t *fsr,
8197                   ARMMMUFaultInfo *fi)
8198 {
8199     ARMCPU *cpu = ARM_CPU(cs);
8200     CPUARMState *env = &cpu->env;
8201     hwaddr phys_addr;
8202     target_ulong page_size;
8203     int prot;
8204     int ret;
8205     MemTxAttrs attrs = {};
8206 
8207     ret = get_phys_addr(env, address, access_type, mmu_idx, &phys_addr,
8208                         &attrs, &prot, &page_size, fsr, fi);
8209     if (!ret) {
8210         /* Map a single [sub]page.  */
8211         phys_addr &= TARGET_PAGE_MASK;
8212         address &= TARGET_PAGE_MASK;
8213         tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
8214                                 prot, mmu_idx, page_size);
8215         return 0;
8216     }
8217 
8218     return ret;
8219 }
8220 
8221 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
8222                                          MemTxAttrs *attrs)
8223 {
8224     ARMCPU *cpu = ARM_CPU(cs);
8225     CPUARMState *env = &cpu->env;
8226     hwaddr phys_addr;
8227     target_ulong page_size;
8228     int prot;
8229     bool ret;
8230     uint32_t fsr;
8231     ARMMMUFaultInfo fi = {};
8232 
8233     *attrs = (MemTxAttrs) {};
8234 
8235     ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env, false), &phys_addr,
8236                         attrs, &prot, &page_size, &fsr, &fi);
8237 
8238     if (ret) {
8239         return -1;
8240     }
8241     return phys_addr;
8242 }
8243 
8244 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
8245 {
8246     ARMCPU *cpu = arm_env_get_cpu(env);
8247 
8248     switch (reg) {
8249     case 0: /* APSR */
8250         return xpsr_read(env) & 0xf8000000;
8251     case 1: /* IAPSR */
8252         return xpsr_read(env) & 0xf80001ff;
8253     case 2: /* EAPSR */
8254         return xpsr_read(env) & 0xff00fc00;
8255     case 3: /* xPSR */
8256         return xpsr_read(env) & 0xff00fdff;
8257     case 5: /* IPSR */
8258         return xpsr_read(env) & 0x000001ff;
8259     case 6: /* EPSR */
8260         return xpsr_read(env) & 0x0700fc00;
8261     case 7: /* IEPSR */
8262         return xpsr_read(env) & 0x0700edff;
8263     case 8: /* MSP */
8264         return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
8265     case 9: /* PSP */
8266         return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
8267     case 16: /* PRIMASK */
8268         return (env->daif & PSTATE_I) != 0;
8269     case 17: /* BASEPRI */
8270     case 18: /* BASEPRI_MAX */
8271         return env->v7m.basepri;
8272     case 19: /* FAULTMASK */
8273         return (env->daif & PSTATE_F) != 0;
8274     case 20: /* CONTROL */
8275         return env->v7m.control;
8276     default:
8277         /* ??? For debugging only.  */
8278         cpu_abort(CPU(cpu), "Unimplemented system register read (%d)\n", reg);
8279         return 0;
8280     }
8281 }
8282 
8283 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
8284 {
8285     ARMCPU *cpu = arm_env_get_cpu(env);
8286 
8287     switch (reg) {
8288     case 0: /* APSR */
8289         xpsr_write(env, val, 0xf8000000);
8290         break;
8291     case 1: /* IAPSR */
8292         xpsr_write(env, val, 0xf8000000);
8293         break;
8294     case 2: /* EAPSR */
8295         xpsr_write(env, val, 0xfe00fc00);
8296         break;
8297     case 3: /* xPSR */
8298         xpsr_write(env, val, 0xfe00fc00);
8299         break;
8300     case 5: /* IPSR */
8301         /* IPSR bits are readonly.  */
8302         break;
8303     case 6: /* EPSR */
8304         xpsr_write(env, val, 0x0600fc00);
8305         break;
8306     case 7: /* IEPSR */
8307         xpsr_write(env, val, 0x0600fc00);
8308         break;
8309     case 8: /* MSP */
8310         if (env->v7m.current_sp)
8311             env->v7m.other_sp = val;
8312         else
8313             env->regs[13] = val;
8314         break;
8315     case 9: /* PSP */
8316         if (env->v7m.current_sp)
8317             env->regs[13] = val;
8318         else
8319             env->v7m.other_sp = val;
8320         break;
8321     case 16: /* PRIMASK */
8322         if (val & 1) {
8323             env->daif |= PSTATE_I;
8324         } else {
8325             env->daif &= ~PSTATE_I;
8326         }
8327         break;
8328     case 17: /* BASEPRI */
8329         env->v7m.basepri = val & 0xff;
8330         break;
8331     case 18: /* BASEPRI_MAX */
8332         val &= 0xff;
8333         if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
8334             env->v7m.basepri = val;
8335         break;
8336     case 19: /* FAULTMASK */
8337         if (val & 1) {
8338             env->daif |= PSTATE_F;
8339         } else {
8340             env->daif &= ~PSTATE_F;
8341         }
8342         break;
8343     case 20: /* CONTROL */
8344         env->v7m.control = val & 3;
8345         switch_v7m_sp(env, (val & 2) != 0);
8346         break;
8347     default:
8348         /* ??? For debugging only.  */
8349         cpu_abort(CPU(cpu), "Unimplemented system register write (%d)\n", reg);
8350         return;
8351     }
8352 }
8353 
8354 #endif
8355 
8356 void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
8357 {
8358     /* Implement DC ZVA, which zeroes a fixed-length block of memory.
8359      * Note that we do not implement the (architecturally mandated)
8360      * alignment fault for attempts to use this on Device memory
8361      * (which matches the usual QEMU behaviour of not implementing either
8362      * alignment faults or any memory attribute handling).
8363      */
8364 
8365     ARMCPU *cpu = arm_env_get_cpu(env);
8366     uint64_t blocklen = 4 << cpu->dcz_blocksize;
8367     uint64_t vaddr = vaddr_in & ~(blocklen - 1);
8368 
8369 #ifndef CONFIG_USER_ONLY
8370     {
8371         /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
8372          * the block size so we might have to do more than one TLB lookup.
8373          * We know that in fact for any v8 CPU the page size is at least 4K
8374          * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
8375          * 1K as an artefact of legacy v5 subpage support being present in the
8376          * same QEMU executable.
8377          */
8378         int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
8379         void *hostaddr[maxidx];
8380         int try, i;
8381         unsigned mmu_idx = cpu_mmu_index(env, false);
8382         TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
8383 
8384         for (try = 0; try < 2; try++) {
8385 
8386             for (i = 0; i < maxidx; i++) {
8387                 hostaddr[i] = tlb_vaddr_to_host(env,
8388                                                 vaddr + TARGET_PAGE_SIZE * i,
8389                                                 1, mmu_idx);
8390                 if (!hostaddr[i]) {
8391                     break;
8392                 }
8393             }
8394             if (i == maxidx) {
8395                 /* If it's all in the TLB it's fair game for just writing to;
8396                  * we know we don't need to update dirty status, etc.
8397                  */
8398                 for (i = 0; i < maxidx - 1; i++) {
8399                     memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
8400                 }
8401                 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
8402                 return;
8403             }
8404             /* OK, try a store and see if we can populate the tlb. This
8405              * might cause an exception if the memory isn't writable,
8406              * in which case we will longjmp out of here. We must for
8407              * this purpose use the actual register value passed to us
8408              * so that we get the fault address right.
8409              */
8410             helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
8411             /* Now we can populate the other TLB entries, if any */
8412             for (i = 0; i < maxidx; i++) {
8413                 uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
8414                 if (va != (vaddr_in & TARGET_PAGE_MASK)) {
8415                     helper_ret_stb_mmu(env, va, 0, oi, GETPC());
8416                 }
8417             }
8418         }
8419 
8420         /* Slow path (probably attempt to do this to an I/O device or
8421          * similar, or clearing of a block of code we have translations
8422          * cached for). Just do a series of byte writes as the architecture
8423          * demands. It's not worth trying to use a cpu_physical_memory_map(),
8424          * memset(), unmap() sequence here because:
8425          *  + we'd need to account for the blocksize being larger than a page
8426          *  + the direct-RAM access case is almost always going to be dealt
8427          *    with in the fastpath code above, so there's no speed benefit
8428          *  + we would have to deal with the map returning NULL because the
8429          *    bounce buffer was in use
8430          */
8431         for (i = 0; i < blocklen; i++) {
8432             helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
8433         }
8434     }
8435 #else
8436     memset(g2h(vaddr), 0, blocklen);
8437 #endif
8438 }
8439 
8440 /* Note that signed overflow is undefined in C.  The following routines are
8441    careful to use unsigned types where modulo arithmetic is required.
8442    Failure to do so _will_ break on newer gcc.  */
8443 
8444 /* Signed saturating arithmetic.  */
8445 
8446 /* Perform 16-bit signed saturating addition.  */
8447 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
8448 {
8449     uint16_t res;
8450 
8451     res = a + b;
8452     if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
8453         if (a & 0x8000)
8454             res = 0x8000;
8455         else
8456             res = 0x7fff;
8457     }
8458     return res;
8459 }
8460 
8461 /* Perform 8-bit signed saturating addition.  */
8462 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
8463 {
8464     uint8_t res;
8465 
8466     res = a + b;
8467     if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
8468         if (a & 0x80)
8469             res = 0x80;
8470         else
8471             res = 0x7f;
8472     }
8473     return res;
8474 }
8475 
8476 /* Perform 16-bit signed saturating subtraction.  */
8477 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
8478 {
8479     uint16_t res;
8480 
8481     res = a - b;
8482     if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
8483         if (a & 0x8000)
8484             res = 0x8000;
8485         else
8486             res = 0x7fff;
8487     }
8488     return res;
8489 }
8490 
8491 /* Perform 8-bit signed saturating subtraction.  */
8492 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
8493 {
8494     uint8_t res;
8495 
8496     res = a - b;
8497     if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
8498         if (a & 0x80)
8499             res = 0x80;
8500         else
8501             res = 0x7f;
8502     }
8503     return res;
8504 }
8505 
8506 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
8507 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
8508 #define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
8509 #define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
8510 #define PFX q
8511 
8512 #include "op_addsub.h"
8513 
8514 /* Unsigned saturating arithmetic.  */
8515 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
8516 {
8517     uint16_t res;
8518     res = a + b;
8519     if (res < a)
8520         res = 0xffff;
8521     return res;
8522 }
8523 
8524 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
8525 {
8526     if (a > b)
8527         return a - b;
8528     else
8529         return 0;
8530 }
8531 
8532 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
8533 {
8534     uint8_t res;
8535     res = a + b;
8536     if (res < a)
8537         res = 0xff;
8538     return res;
8539 }
8540 
8541 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
8542 {
8543     if (a > b)
8544         return a - b;
8545     else
8546         return 0;
8547 }
8548 
8549 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
8550 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
8551 #define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
8552 #define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
8553 #define PFX uq
8554 
8555 #include "op_addsub.h"
8556 
8557 /* Signed modulo arithmetic.  */
8558 #define SARITH16(a, b, n, op) do { \
8559     int32_t sum; \
8560     sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
8561     RESULT(sum, n, 16); \
8562     if (sum >= 0) \
8563         ge |= 3 << (n * 2); \
8564     } while(0)
8565 
8566 #define SARITH8(a, b, n, op) do { \
8567     int32_t sum; \
8568     sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
8569     RESULT(sum, n, 8); \
8570     if (sum >= 0) \
8571         ge |= 1 << n; \
8572     } while(0)
8573 
8574 
8575 #define ADD16(a, b, n) SARITH16(a, b, n, +)
8576 #define SUB16(a, b, n) SARITH16(a, b, n, -)
8577 #define ADD8(a, b, n)  SARITH8(a, b, n, +)
8578 #define SUB8(a, b, n)  SARITH8(a, b, n, -)
8579 #define PFX s
8580 #define ARITH_GE
8581 
8582 #include "op_addsub.h"
8583 
8584 /* Unsigned modulo arithmetic.  */
8585 #define ADD16(a, b, n) do { \
8586     uint32_t sum; \
8587     sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
8588     RESULT(sum, n, 16); \
8589     if ((sum >> 16) == 1) \
8590         ge |= 3 << (n * 2); \
8591     } while(0)
8592 
8593 #define ADD8(a, b, n) do { \
8594     uint32_t sum; \
8595     sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
8596     RESULT(sum, n, 8); \
8597     if ((sum >> 8) == 1) \
8598         ge |= 1 << n; \
8599     } while(0)
8600 
8601 #define SUB16(a, b, n) do { \
8602     uint32_t sum; \
8603     sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
8604     RESULT(sum, n, 16); \
8605     if ((sum >> 16) == 0) \
8606         ge |= 3 << (n * 2); \
8607     } while(0)
8608 
8609 #define SUB8(a, b, n) do { \
8610     uint32_t sum; \
8611     sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
8612     RESULT(sum, n, 8); \
8613     if ((sum >> 8) == 0) \
8614         ge |= 1 << n; \
8615     } while(0)
8616 
8617 #define PFX u
8618 #define ARITH_GE
8619 
8620 #include "op_addsub.h"
8621 
8622 /* Halved signed arithmetic.  */
8623 #define ADD16(a, b, n) \
8624   RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
8625 #define SUB16(a, b, n) \
8626   RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
8627 #define ADD8(a, b, n) \
8628   RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
8629 #define SUB8(a, b, n) \
8630   RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
8631 #define PFX sh
8632 
8633 #include "op_addsub.h"
8634 
8635 /* Halved unsigned arithmetic.  */
8636 #define ADD16(a, b, n) \
8637   RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
8638 #define SUB16(a, b, n) \
8639   RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
8640 #define ADD8(a, b, n) \
8641   RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
8642 #define SUB8(a, b, n) \
8643   RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
8644 #define PFX uh
8645 
8646 #include "op_addsub.h"
8647 
8648 static inline uint8_t do_usad(uint8_t a, uint8_t b)
8649 {
8650     if (a > b)
8651         return a - b;
8652     else
8653         return b - a;
8654 }
8655 
8656 /* Unsigned sum of absolute byte differences.  */
8657 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
8658 {
8659     uint32_t sum;
8660     sum = do_usad(a, b);
8661     sum += do_usad(a >> 8, b >> 8);
8662     sum += do_usad(a >> 16, b >>16);
8663     sum += do_usad(a >> 24, b >> 24);
8664     return sum;
8665 }
8666 
8667 /* For ARMv6 SEL instruction.  */
8668 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
8669 {
8670     uint32_t mask;
8671 
8672     mask = 0;
8673     if (flags & 1)
8674         mask |= 0xff;
8675     if (flags & 2)
8676         mask |= 0xff00;
8677     if (flags & 4)
8678         mask |= 0xff0000;
8679     if (flags & 8)
8680         mask |= 0xff000000;
8681     return (a & mask) | (b & ~mask);
8682 }
8683 
8684 /* VFP support.  We follow the convention used for VFP instructions:
8685    Single precision routines have a "s" suffix, double precision a
8686    "d" suffix.  */
8687 
8688 /* Convert host exception flags to vfp form.  */
8689 static inline int vfp_exceptbits_from_host(int host_bits)
8690 {
8691     int target_bits = 0;
8692 
8693     if (host_bits & float_flag_invalid)
8694         target_bits |= 1;
8695     if (host_bits & float_flag_divbyzero)
8696         target_bits |= 2;
8697     if (host_bits & float_flag_overflow)
8698         target_bits |= 4;
8699     if (host_bits & (float_flag_underflow | float_flag_output_denormal))
8700         target_bits |= 8;
8701     if (host_bits & float_flag_inexact)
8702         target_bits |= 0x10;
8703     if (host_bits & float_flag_input_denormal)
8704         target_bits |= 0x80;
8705     return target_bits;
8706 }
8707 
8708 uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
8709 {
8710     int i;
8711     uint32_t fpscr;
8712 
8713     fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
8714             | (env->vfp.vec_len << 16)
8715             | (env->vfp.vec_stride << 20);
8716     i = get_float_exception_flags(&env->vfp.fp_status);
8717     i |= get_float_exception_flags(&env->vfp.standard_fp_status);
8718     fpscr |= vfp_exceptbits_from_host(i);
8719     return fpscr;
8720 }
8721 
8722 uint32_t vfp_get_fpscr(CPUARMState *env)
8723 {
8724     return HELPER(vfp_get_fpscr)(env);
8725 }
8726 
8727 /* Convert vfp exception flags to target form.  */
8728 static inline int vfp_exceptbits_to_host(int target_bits)
8729 {
8730     int host_bits = 0;
8731 
8732     if (target_bits & 1)
8733         host_bits |= float_flag_invalid;
8734     if (target_bits & 2)
8735         host_bits |= float_flag_divbyzero;
8736     if (target_bits & 4)
8737         host_bits |= float_flag_overflow;
8738     if (target_bits & 8)
8739         host_bits |= float_flag_underflow;
8740     if (target_bits & 0x10)
8741         host_bits |= float_flag_inexact;
8742     if (target_bits & 0x80)
8743         host_bits |= float_flag_input_denormal;
8744     return host_bits;
8745 }
8746 
8747 void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
8748 {
8749     int i;
8750     uint32_t changed;
8751 
8752     changed = env->vfp.xregs[ARM_VFP_FPSCR];
8753     env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
8754     env->vfp.vec_len = (val >> 16) & 7;
8755     env->vfp.vec_stride = (val >> 20) & 3;
8756 
8757     changed ^= val;
8758     if (changed & (3 << 22)) {
8759         i = (val >> 22) & 3;
8760         switch (i) {
8761         case FPROUNDING_TIEEVEN:
8762             i = float_round_nearest_even;
8763             break;
8764         case FPROUNDING_POSINF:
8765             i = float_round_up;
8766             break;
8767         case FPROUNDING_NEGINF:
8768             i = float_round_down;
8769             break;
8770         case FPROUNDING_ZERO:
8771             i = float_round_to_zero;
8772             break;
8773         }
8774         set_float_rounding_mode(i, &env->vfp.fp_status);
8775     }
8776     if (changed & (1 << 24)) {
8777         set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
8778         set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
8779     }
8780     if (changed & (1 << 25))
8781         set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
8782 
8783     i = vfp_exceptbits_to_host(val);
8784     set_float_exception_flags(i, &env->vfp.fp_status);
8785     set_float_exception_flags(0, &env->vfp.standard_fp_status);
8786 }
8787 
8788 void vfp_set_fpscr(CPUARMState *env, uint32_t val)
8789 {
8790     HELPER(vfp_set_fpscr)(env, val);
8791 }
8792 
8793 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
8794 
8795 #define VFP_BINOP(name) \
8796 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
8797 { \
8798     float_status *fpst = fpstp; \
8799     return float32_ ## name(a, b, fpst); \
8800 } \
8801 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
8802 { \
8803     float_status *fpst = fpstp; \
8804     return float64_ ## name(a, b, fpst); \
8805 }
8806 VFP_BINOP(add)
8807 VFP_BINOP(sub)
8808 VFP_BINOP(mul)
8809 VFP_BINOP(div)
8810 VFP_BINOP(min)
8811 VFP_BINOP(max)
8812 VFP_BINOP(minnum)
8813 VFP_BINOP(maxnum)
8814 #undef VFP_BINOP
8815 
8816 float32 VFP_HELPER(neg, s)(float32 a)
8817 {
8818     return float32_chs(a);
8819 }
8820 
8821 float64 VFP_HELPER(neg, d)(float64 a)
8822 {
8823     return float64_chs(a);
8824 }
8825 
8826 float32 VFP_HELPER(abs, s)(float32 a)
8827 {
8828     return float32_abs(a);
8829 }
8830 
8831 float64 VFP_HELPER(abs, d)(float64 a)
8832 {
8833     return float64_abs(a);
8834 }
8835 
8836 float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
8837 {
8838     return float32_sqrt(a, &env->vfp.fp_status);
8839 }
8840 
8841 float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
8842 {
8843     return float64_sqrt(a, &env->vfp.fp_status);
8844 }
8845 
8846 /* XXX: check quiet/signaling case */
8847 #define DO_VFP_cmp(p, type) \
8848 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env)  \
8849 { \
8850     uint32_t flags; \
8851     switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
8852     case 0: flags = 0x6; break; \
8853     case -1: flags = 0x8; break; \
8854     case 1: flags = 0x2; break; \
8855     default: case 2: flags = 0x3; break; \
8856     } \
8857     env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
8858         | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
8859 } \
8860 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
8861 { \
8862     uint32_t flags; \
8863     switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
8864     case 0: flags = 0x6; break; \
8865     case -1: flags = 0x8; break; \
8866     case 1: flags = 0x2; break; \
8867     default: case 2: flags = 0x3; break; \
8868     } \
8869     env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
8870         | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
8871 }
8872 DO_VFP_cmp(s, float32)
8873 DO_VFP_cmp(d, float64)
8874 #undef DO_VFP_cmp
8875 
8876 /* Integer to float and float to integer conversions */
8877 
8878 #define CONV_ITOF(name, fsz, sign) \
8879     float##fsz HELPER(name)(uint32_t x, void *fpstp) \
8880 { \
8881     float_status *fpst = fpstp; \
8882     return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
8883 }
8884 
8885 #define CONV_FTOI(name, fsz, sign, round) \
8886 uint32_t HELPER(name)(float##fsz x, void *fpstp) \
8887 { \
8888     float_status *fpst = fpstp; \
8889     if (float##fsz##_is_any_nan(x)) { \
8890         float_raise(float_flag_invalid, fpst); \
8891         return 0; \
8892     } \
8893     return float##fsz##_to_##sign##int32##round(x, fpst); \
8894 }
8895 
8896 #define FLOAT_CONVS(name, p, fsz, sign) \
8897 CONV_ITOF(vfp_##name##to##p, fsz, sign) \
8898 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
8899 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
8900 
8901 FLOAT_CONVS(si, s, 32, )
8902 FLOAT_CONVS(si, d, 64, )
8903 FLOAT_CONVS(ui, s, 32, u)
8904 FLOAT_CONVS(ui, d, 64, u)
8905 
8906 #undef CONV_ITOF
8907 #undef CONV_FTOI
8908 #undef FLOAT_CONVS
8909 
8910 /* floating point conversion */
8911 float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
8912 {
8913     float64 r = float32_to_float64(x, &env->vfp.fp_status);
8914     /* ARM requires that S<->D conversion of any kind of NaN generates
8915      * a quiet NaN by forcing the most significant frac bit to 1.
8916      */
8917     return float64_maybe_silence_nan(r, &env->vfp.fp_status);
8918 }
8919 
8920 float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
8921 {
8922     float32 r =  float64_to_float32(x, &env->vfp.fp_status);
8923     /* ARM requires that S<->D conversion of any kind of NaN generates
8924      * a quiet NaN by forcing the most significant frac bit to 1.
8925      */
8926     return float32_maybe_silence_nan(r, &env->vfp.fp_status);
8927 }
8928 
8929 /* VFP3 fixed point conversion.  */
8930 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
8931 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t  x, uint32_t shift, \
8932                                      void *fpstp) \
8933 { \
8934     float_status *fpst = fpstp; \
8935     float##fsz tmp; \
8936     tmp = itype##_to_##float##fsz(x, fpst); \
8937     return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
8938 }
8939 
8940 /* Notice that we want only input-denormal exception flags from the
8941  * scalbn operation: the other possible flags (overflow+inexact if
8942  * we overflow to infinity, output-denormal) aren't correct for the
8943  * complete scale-and-convert operation.
8944  */
8945 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
8946 uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
8947                                              uint32_t shift, \
8948                                              void *fpstp) \
8949 { \
8950     float_status *fpst = fpstp; \
8951     int old_exc_flags = get_float_exception_flags(fpst); \
8952     float##fsz tmp; \
8953     if (float##fsz##_is_any_nan(x)) { \
8954         float_raise(float_flag_invalid, fpst); \
8955         return 0; \
8956     } \
8957     tmp = float##fsz##_scalbn(x, shift, fpst); \
8958     old_exc_flags |= get_float_exception_flags(fpst) \
8959         & float_flag_input_denormal; \
8960     set_float_exception_flags(old_exc_flags, fpst); \
8961     return float##fsz##_to_##itype##round(tmp, fpst); \
8962 }
8963 
8964 #define VFP_CONV_FIX(name, p, fsz, isz, itype)                   \
8965 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype)                     \
8966 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
8967 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
8968 
8969 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype)               \
8970 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype)                     \
8971 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
8972 
8973 VFP_CONV_FIX(sh, d, 64, 64, int16)
8974 VFP_CONV_FIX(sl, d, 64, 64, int32)
8975 VFP_CONV_FIX_A64(sq, d, 64, 64, int64)
8976 VFP_CONV_FIX(uh, d, 64, 64, uint16)
8977 VFP_CONV_FIX(ul, d, 64, 64, uint32)
8978 VFP_CONV_FIX_A64(uq, d, 64, 64, uint64)
8979 VFP_CONV_FIX(sh, s, 32, 32, int16)
8980 VFP_CONV_FIX(sl, s, 32, 32, int32)
8981 VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
8982 VFP_CONV_FIX(uh, s, 32, 32, uint16)
8983 VFP_CONV_FIX(ul, s, 32, 32, uint32)
8984 VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
8985 #undef VFP_CONV_FIX
8986 #undef VFP_CONV_FIX_FLOAT
8987 #undef VFP_CONV_FLOAT_FIX_ROUND
8988 
8989 /* Set the current fp rounding mode and return the old one.
8990  * The argument is a softfloat float_round_ value.
8991  */
8992 uint32_t HELPER(set_rmode)(uint32_t rmode, CPUARMState *env)
8993 {
8994     float_status *fp_status = &env->vfp.fp_status;
8995 
8996     uint32_t prev_rmode = get_float_rounding_mode(fp_status);
8997     set_float_rounding_mode(rmode, fp_status);
8998 
8999     return prev_rmode;
9000 }
9001 
9002 /* Set the current fp rounding mode in the standard fp status and return
9003  * the old one. This is for NEON instructions that need to change the
9004  * rounding mode but wish to use the standard FPSCR values for everything
9005  * else. Always set the rounding mode back to the correct value after
9006  * modifying it.
9007  * The argument is a softfloat float_round_ value.
9008  */
9009 uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env)
9010 {
9011     float_status *fp_status = &env->vfp.standard_fp_status;
9012 
9013     uint32_t prev_rmode = get_float_rounding_mode(fp_status);
9014     set_float_rounding_mode(rmode, fp_status);
9015 
9016     return prev_rmode;
9017 }
9018 
9019 /* Half precision conversions.  */
9020 static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
9021 {
9022     int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
9023     float32 r = float16_to_float32(make_float16(a), ieee, s);
9024     if (ieee) {
9025         return float32_maybe_silence_nan(r, s);
9026     }
9027     return r;
9028 }
9029 
9030 static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
9031 {
9032     int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
9033     float16 r = float32_to_float16(a, ieee, s);
9034     if (ieee) {
9035         r = float16_maybe_silence_nan(r, s);
9036     }
9037     return float16_val(r);
9038 }
9039 
9040 float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
9041 {
9042     return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
9043 }
9044 
9045 uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
9046 {
9047     return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
9048 }
9049 
9050 float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
9051 {
9052     return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
9053 }
9054 
9055 uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
9056 {
9057     return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
9058 }
9059 
9060 float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, CPUARMState *env)
9061 {
9062     int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
9063     float64 r = float16_to_float64(make_float16(a), ieee, &env->vfp.fp_status);
9064     if (ieee) {
9065         return float64_maybe_silence_nan(r, &env->vfp.fp_status);
9066     }
9067     return r;
9068 }
9069 
9070 uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, CPUARMState *env)
9071 {
9072     int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
9073     float16 r = float64_to_float16(a, ieee, &env->vfp.fp_status);
9074     if (ieee) {
9075         r = float16_maybe_silence_nan(r, &env->vfp.fp_status);
9076     }
9077     return float16_val(r);
9078 }
9079 
9080 #define float32_two make_float32(0x40000000)
9081 #define float32_three make_float32(0x40400000)
9082 #define float32_one_point_five make_float32(0x3fc00000)
9083 
9084 float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
9085 {
9086     float_status *s = &env->vfp.standard_fp_status;
9087     if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
9088         (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
9089         if (!(float32_is_zero(a) || float32_is_zero(b))) {
9090             float_raise(float_flag_input_denormal, s);
9091         }
9092         return float32_two;
9093     }
9094     return float32_sub(float32_two, float32_mul(a, b, s), s);
9095 }
9096 
9097 float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
9098 {
9099     float_status *s = &env->vfp.standard_fp_status;
9100     float32 product;
9101     if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
9102         (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
9103         if (!(float32_is_zero(a) || float32_is_zero(b))) {
9104             float_raise(float_flag_input_denormal, s);
9105         }
9106         return float32_one_point_five;
9107     }
9108     product = float32_mul(a, b, s);
9109     return float32_div(float32_sub(float32_three, product, s), float32_two, s);
9110 }
9111 
9112 /* NEON helpers.  */
9113 
9114 /* Constants 256 and 512 are used in some helpers; we avoid relying on
9115  * int->float conversions at run-time.  */
9116 #define float64_256 make_float64(0x4070000000000000LL)
9117 #define float64_512 make_float64(0x4080000000000000LL)
9118 #define float32_maxnorm make_float32(0x7f7fffff)
9119 #define float64_maxnorm make_float64(0x7fefffffffffffffLL)
9120 
9121 /* Reciprocal functions
9122  *
9123  * The algorithm that must be used to calculate the estimate
9124  * is specified by the ARM ARM, see FPRecipEstimate()
9125  */
9126 
9127 static float64 recip_estimate(float64 a, float_status *real_fp_status)
9128 {
9129     /* These calculations mustn't set any fp exception flags,
9130      * so we use a local copy of the fp_status.
9131      */
9132     float_status dummy_status = *real_fp_status;
9133     float_status *s = &dummy_status;
9134     /* q = (int)(a * 512.0) */
9135     float64 q = float64_mul(float64_512, a, s);
9136     int64_t q_int = float64_to_int64_round_to_zero(q, s);
9137 
9138     /* r = 1.0 / (((double)q + 0.5) / 512.0) */
9139     q = int64_to_float64(q_int, s);
9140     q = float64_add(q, float64_half, s);
9141     q = float64_div(q, float64_512, s);
9142     q = float64_div(float64_one, q, s);
9143 
9144     /* s = (int)(256.0 * r + 0.5) */
9145     q = float64_mul(q, float64_256, s);
9146     q = float64_add(q, float64_half, s);
9147     q_int = float64_to_int64_round_to_zero(q, s);
9148 
9149     /* return (double)s / 256.0 */
9150     return float64_div(int64_to_float64(q_int, s), float64_256, s);
9151 }
9152 
9153 /* Common wrapper to call recip_estimate */
9154 static float64 call_recip_estimate(float64 num, int off, float_status *fpst)
9155 {
9156     uint64_t val64 = float64_val(num);
9157     uint64_t frac = extract64(val64, 0, 52);
9158     int64_t exp = extract64(val64, 52, 11);
9159     uint64_t sbit;
9160     float64 scaled, estimate;
9161 
9162     /* Generate the scaled number for the estimate function */
9163     if (exp == 0) {
9164         if (extract64(frac, 51, 1) == 0) {
9165             exp = -1;
9166             frac = extract64(frac, 0, 50) << 2;
9167         } else {
9168             frac = extract64(frac, 0, 51) << 1;
9169         }
9170     }
9171 
9172     /* scaled = '0' : '01111111110' : fraction<51:44> : Zeros(44); */
9173     scaled = make_float64((0x3feULL << 52)
9174                           | extract64(frac, 44, 8) << 44);
9175 
9176     estimate = recip_estimate(scaled, fpst);
9177 
9178     /* Build new result */
9179     val64 = float64_val(estimate);
9180     sbit = 0x8000000000000000ULL & val64;
9181     exp = off - exp;
9182     frac = extract64(val64, 0, 52);
9183 
9184     if (exp == 0) {
9185         frac = 1ULL << 51 | extract64(frac, 1, 51);
9186     } else if (exp == -1) {
9187         frac = 1ULL << 50 | extract64(frac, 2, 50);
9188         exp = 0;
9189     }
9190 
9191     return make_float64(sbit | (exp << 52) | frac);
9192 }
9193 
9194 static bool round_to_inf(float_status *fpst, bool sign_bit)
9195 {
9196     switch (fpst->float_rounding_mode) {
9197     case float_round_nearest_even: /* Round to Nearest */
9198         return true;
9199     case float_round_up: /* Round to +Inf */
9200         return !sign_bit;
9201     case float_round_down: /* Round to -Inf */
9202         return sign_bit;
9203     case float_round_to_zero: /* Round to Zero */
9204         return false;
9205     }
9206 
9207     g_assert_not_reached();
9208 }
9209 
9210 float32 HELPER(recpe_f32)(float32 input, void *fpstp)
9211 {
9212     float_status *fpst = fpstp;
9213     float32 f32 = float32_squash_input_denormal(input, fpst);
9214     uint32_t f32_val = float32_val(f32);
9215     uint32_t f32_sbit = 0x80000000ULL & f32_val;
9216     int32_t f32_exp = extract32(f32_val, 23, 8);
9217     uint32_t f32_frac = extract32(f32_val, 0, 23);
9218     float64 f64, r64;
9219     uint64_t r64_val;
9220     int64_t r64_exp;
9221     uint64_t r64_frac;
9222 
9223     if (float32_is_any_nan(f32)) {
9224         float32 nan = f32;
9225         if (float32_is_signaling_nan(f32, fpst)) {
9226             float_raise(float_flag_invalid, fpst);
9227             nan = float32_maybe_silence_nan(f32, fpst);
9228         }
9229         if (fpst->default_nan_mode) {
9230             nan =  float32_default_nan(fpst);
9231         }
9232         return nan;
9233     } else if (float32_is_infinity(f32)) {
9234         return float32_set_sign(float32_zero, float32_is_neg(f32));
9235     } else if (float32_is_zero(f32)) {
9236         float_raise(float_flag_divbyzero, fpst);
9237         return float32_set_sign(float32_infinity, float32_is_neg(f32));
9238     } else if ((f32_val & ~(1ULL << 31)) < (1ULL << 21)) {
9239         /* Abs(value) < 2.0^-128 */
9240         float_raise(float_flag_overflow | float_flag_inexact, fpst);
9241         if (round_to_inf(fpst, f32_sbit)) {
9242             return float32_set_sign(float32_infinity, float32_is_neg(f32));
9243         } else {
9244             return float32_set_sign(float32_maxnorm, float32_is_neg(f32));
9245         }
9246     } else if (f32_exp >= 253 && fpst->flush_to_zero) {
9247         float_raise(float_flag_underflow, fpst);
9248         return float32_set_sign(float32_zero, float32_is_neg(f32));
9249     }
9250 
9251 
9252     f64 = make_float64(((int64_t)(f32_exp) << 52) | (int64_t)(f32_frac) << 29);
9253     r64 = call_recip_estimate(f64, 253, fpst);
9254     r64_val = float64_val(r64);
9255     r64_exp = extract64(r64_val, 52, 11);
9256     r64_frac = extract64(r64_val, 0, 52);
9257 
9258     /* result = sign : result_exp<7:0> : fraction<51:29>; */
9259     return make_float32(f32_sbit |
9260                         (r64_exp & 0xff) << 23 |
9261                         extract64(r64_frac, 29, 24));
9262 }
9263 
9264 float64 HELPER(recpe_f64)(float64 input, void *fpstp)
9265 {
9266     float_status *fpst = fpstp;
9267     float64 f64 = float64_squash_input_denormal(input, fpst);
9268     uint64_t f64_val = float64_val(f64);
9269     uint64_t f64_sbit = 0x8000000000000000ULL & f64_val;
9270     int64_t f64_exp = extract64(f64_val, 52, 11);
9271     float64 r64;
9272     uint64_t r64_val;
9273     int64_t r64_exp;
9274     uint64_t r64_frac;
9275 
9276     /* Deal with any special cases */
9277     if (float64_is_any_nan(f64)) {
9278         float64 nan = f64;
9279         if (float64_is_signaling_nan(f64, fpst)) {
9280             float_raise(float_flag_invalid, fpst);
9281             nan = float64_maybe_silence_nan(f64, fpst);
9282         }
9283         if (fpst->default_nan_mode) {
9284             nan =  float64_default_nan(fpst);
9285         }
9286         return nan;
9287     } else if (float64_is_infinity(f64)) {
9288         return float64_set_sign(float64_zero, float64_is_neg(f64));
9289     } else if (float64_is_zero(f64)) {
9290         float_raise(float_flag_divbyzero, fpst);
9291         return float64_set_sign(float64_infinity, float64_is_neg(f64));
9292     } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) {
9293         /* Abs(value) < 2.0^-1024 */
9294         float_raise(float_flag_overflow | float_flag_inexact, fpst);
9295         if (round_to_inf(fpst, f64_sbit)) {
9296             return float64_set_sign(float64_infinity, float64_is_neg(f64));
9297         } else {
9298             return float64_set_sign(float64_maxnorm, float64_is_neg(f64));
9299         }
9300     } else if (f64_exp >= 2045 && fpst->flush_to_zero) {
9301         float_raise(float_flag_underflow, fpst);
9302         return float64_set_sign(float64_zero, float64_is_neg(f64));
9303     }
9304 
9305     r64 = call_recip_estimate(f64, 2045, fpst);
9306     r64_val = float64_val(r64);
9307     r64_exp = extract64(r64_val, 52, 11);
9308     r64_frac = extract64(r64_val, 0, 52);
9309 
9310     /* result = sign : result_exp<10:0> : fraction<51:0> */
9311     return make_float64(f64_sbit |
9312                         ((r64_exp & 0x7ff) << 52) |
9313                         r64_frac);
9314 }
9315 
9316 /* The algorithm that must be used to calculate the estimate
9317  * is specified by the ARM ARM.
9318  */
9319 static float64 recip_sqrt_estimate(float64 a, float_status *real_fp_status)
9320 {
9321     /* These calculations mustn't set any fp exception flags,
9322      * so we use a local copy of the fp_status.
9323      */
9324     float_status dummy_status = *real_fp_status;
9325     float_status *s = &dummy_status;
9326     float64 q;
9327     int64_t q_int;
9328 
9329     if (float64_lt(a, float64_half, s)) {
9330         /* range 0.25 <= a < 0.5 */
9331 
9332         /* a in units of 1/512 rounded down */
9333         /* q0 = (int)(a * 512.0);  */
9334         q = float64_mul(float64_512, a, s);
9335         q_int = float64_to_int64_round_to_zero(q, s);
9336 
9337         /* reciprocal root r */
9338         /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0);  */
9339         q = int64_to_float64(q_int, s);
9340         q = float64_add(q, float64_half, s);
9341         q = float64_div(q, float64_512, s);
9342         q = float64_sqrt(q, s);
9343         q = float64_div(float64_one, q, s);
9344     } else {
9345         /* range 0.5 <= a < 1.0 */
9346 
9347         /* a in units of 1/256 rounded down */
9348         /* q1 = (int)(a * 256.0); */
9349         q = float64_mul(float64_256, a, s);
9350         int64_t q_int = float64_to_int64_round_to_zero(q, s);
9351 
9352         /* reciprocal root r */
9353         /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
9354         q = int64_to_float64(q_int, s);
9355         q = float64_add(q, float64_half, s);
9356         q = float64_div(q, float64_256, s);
9357         q = float64_sqrt(q, s);
9358         q = float64_div(float64_one, q, s);
9359     }
9360     /* r in units of 1/256 rounded to nearest */
9361     /* s = (int)(256.0 * r + 0.5); */
9362 
9363     q = float64_mul(q, float64_256,s );
9364     q = float64_add(q, float64_half, s);
9365     q_int = float64_to_int64_round_to_zero(q, s);
9366 
9367     /* return (double)s / 256.0;*/
9368     return float64_div(int64_to_float64(q_int, s), float64_256, s);
9369 }
9370 
9371 float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
9372 {
9373     float_status *s = fpstp;
9374     float32 f32 = float32_squash_input_denormal(input, s);
9375     uint32_t val = float32_val(f32);
9376     uint32_t f32_sbit = 0x80000000 & val;
9377     int32_t f32_exp = extract32(val, 23, 8);
9378     uint32_t f32_frac = extract32(val, 0, 23);
9379     uint64_t f64_frac;
9380     uint64_t val64;
9381     int result_exp;
9382     float64 f64;
9383 
9384     if (float32_is_any_nan(f32)) {
9385         float32 nan = f32;
9386         if (float32_is_signaling_nan(f32, s)) {
9387             float_raise(float_flag_invalid, s);
9388             nan = float32_maybe_silence_nan(f32, s);
9389         }
9390         if (s->default_nan_mode) {
9391             nan =  float32_default_nan(s);
9392         }
9393         return nan;
9394     } else if (float32_is_zero(f32)) {
9395         float_raise(float_flag_divbyzero, s);
9396         return float32_set_sign(float32_infinity, float32_is_neg(f32));
9397     } else if (float32_is_neg(f32)) {
9398         float_raise(float_flag_invalid, s);
9399         return float32_default_nan(s);
9400     } else if (float32_is_infinity(f32)) {
9401         return float32_zero;
9402     }
9403 
9404     /* Scale and normalize to a double-precision value between 0.25 and 1.0,
9405      * preserving the parity of the exponent.  */
9406 
9407     f64_frac = ((uint64_t) f32_frac) << 29;
9408     if (f32_exp == 0) {
9409         while (extract64(f64_frac, 51, 1) == 0) {
9410             f64_frac = f64_frac << 1;
9411             f32_exp = f32_exp-1;
9412         }
9413         f64_frac = extract64(f64_frac, 0, 51) << 1;
9414     }
9415 
9416     if (extract64(f32_exp, 0, 1) == 0) {
9417         f64 = make_float64(((uint64_t) f32_sbit) << 32
9418                            | (0x3feULL << 52)
9419                            | f64_frac);
9420     } else {
9421         f64 = make_float64(((uint64_t) f32_sbit) << 32
9422                            | (0x3fdULL << 52)
9423                            | f64_frac);
9424     }
9425 
9426     result_exp = (380 - f32_exp) / 2;
9427 
9428     f64 = recip_sqrt_estimate(f64, s);
9429 
9430     val64 = float64_val(f64);
9431 
9432     val = ((result_exp & 0xff) << 23)
9433         | ((val64 >> 29)  & 0x7fffff);
9434     return make_float32(val);
9435 }
9436 
9437 float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
9438 {
9439     float_status *s = fpstp;
9440     float64 f64 = float64_squash_input_denormal(input, s);
9441     uint64_t val = float64_val(f64);
9442     uint64_t f64_sbit = 0x8000000000000000ULL & val;
9443     int64_t f64_exp = extract64(val, 52, 11);
9444     uint64_t f64_frac = extract64(val, 0, 52);
9445     int64_t result_exp;
9446     uint64_t result_frac;
9447 
9448     if (float64_is_any_nan(f64)) {
9449         float64 nan = f64;
9450         if (float64_is_signaling_nan(f64, s)) {
9451             float_raise(float_flag_invalid, s);
9452             nan = float64_maybe_silence_nan(f64, s);
9453         }
9454         if (s->default_nan_mode) {
9455             nan =  float64_default_nan(s);
9456         }
9457         return nan;
9458     } else if (float64_is_zero(f64)) {
9459         float_raise(float_flag_divbyzero, s);
9460         return float64_set_sign(float64_infinity, float64_is_neg(f64));
9461     } else if (float64_is_neg(f64)) {
9462         float_raise(float_flag_invalid, s);
9463         return float64_default_nan(s);
9464     } else if (float64_is_infinity(f64)) {
9465         return float64_zero;
9466     }
9467 
9468     /* Scale and normalize to a double-precision value between 0.25 and 1.0,
9469      * preserving the parity of the exponent.  */
9470 
9471     if (f64_exp == 0) {
9472         while (extract64(f64_frac, 51, 1) == 0) {
9473             f64_frac = f64_frac << 1;
9474             f64_exp = f64_exp - 1;
9475         }
9476         f64_frac = extract64(f64_frac, 0, 51) << 1;
9477     }
9478 
9479     if (extract64(f64_exp, 0, 1) == 0) {
9480         f64 = make_float64(f64_sbit
9481                            | (0x3feULL << 52)
9482                            | f64_frac);
9483     } else {
9484         f64 = make_float64(f64_sbit
9485                            | (0x3fdULL << 52)
9486                            | f64_frac);
9487     }
9488 
9489     result_exp = (3068 - f64_exp) / 2;
9490 
9491     f64 = recip_sqrt_estimate(f64, s);
9492 
9493     result_frac = extract64(float64_val(f64), 0, 52);
9494 
9495     return make_float64(f64_sbit |
9496                         ((result_exp & 0x7ff) << 52) |
9497                         result_frac);
9498 }
9499 
9500 uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp)
9501 {
9502     float_status *s = fpstp;
9503     float64 f64;
9504 
9505     if ((a & 0x80000000) == 0) {
9506         return 0xffffffff;
9507     }
9508 
9509     f64 = make_float64((0x3feULL << 52)
9510                        | ((int64_t)(a & 0x7fffffff) << 21));
9511 
9512     f64 = recip_estimate(f64, s);
9513 
9514     return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
9515 }
9516 
9517 uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp)
9518 {
9519     float_status *fpst = fpstp;
9520     float64 f64;
9521 
9522     if ((a & 0xc0000000) == 0) {
9523         return 0xffffffff;
9524     }
9525 
9526     if (a & 0x80000000) {
9527         f64 = make_float64((0x3feULL << 52)
9528                            | ((uint64_t)(a & 0x7fffffff) << 21));
9529     } else { /* bits 31-30 == '01' */
9530         f64 = make_float64((0x3fdULL << 52)
9531                            | ((uint64_t)(a & 0x3fffffff) << 22));
9532     }
9533 
9534     f64 = recip_sqrt_estimate(f64, fpst);
9535 
9536     return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
9537 }
9538 
9539 /* VFPv4 fused multiply-accumulate */
9540 float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
9541 {
9542     float_status *fpst = fpstp;
9543     return float32_muladd(a, b, c, 0, fpst);
9544 }
9545 
9546 float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
9547 {
9548     float_status *fpst = fpstp;
9549     return float64_muladd(a, b, c, 0, fpst);
9550 }
9551 
9552 /* ARMv8 round to integral */
9553 float32 HELPER(rints_exact)(float32 x, void *fp_status)
9554 {
9555     return float32_round_to_int(x, fp_status);
9556 }
9557 
9558 float64 HELPER(rintd_exact)(float64 x, void *fp_status)
9559 {
9560     return float64_round_to_int(x, fp_status);
9561 }
9562 
9563 float32 HELPER(rints)(float32 x, void *fp_status)
9564 {
9565     int old_flags = get_float_exception_flags(fp_status), new_flags;
9566     float32 ret;
9567 
9568     ret = float32_round_to_int(x, fp_status);
9569 
9570     /* Suppress any inexact exceptions the conversion produced */
9571     if (!(old_flags & float_flag_inexact)) {
9572         new_flags = get_float_exception_flags(fp_status);
9573         set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
9574     }
9575 
9576     return ret;
9577 }
9578 
9579 float64 HELPER(rintd)(float64 x, void *fp_status)
9580 {
9581     int old_flags = get_float_exception_flags(fp_status), new_flags;
9582     float64 ret;
9583 
9584     ret = float64_round_to_int(x, fp_status);
9585 
9586     new_flags = get_float_exception_flags(fp_status);
9587 
9588     /* Suppress any inexact exceptions the conversion produced */
9589     if (!(old_flags & float_flag_inexact)) {
9590         new_flags = get_float_exception_flags(fp_status);
9591         set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
9592     }
9593 
9594     return ret;
9595 }
9596 
9597 /* Convert ARM rounding mode to softfloat */
9598 int arm_rmode_to_sf(int rmode)
9599 {
9600     switch (rmode) {
9601     case FPROUNDING_TIEAWAY:
9602         rmode = float_round_ties_away;
9603         break;
9604     case FPROUNDING_ODD:
9605         /* FIXME: add support for TIEAWAY and ODD */
9606         qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
9607                       rmode);
9608     case FPROUNDING_TIEEVEN:
9609     default:
9610         rmode = float_round_nearest_even;
9611         break;
9612     case FPROUNDING_POSINF:
9613         rmode = float_round_up;
9614         break;
9615     case FPROUNDING_NEGINF:
9616         rmode = float_round_down;
9617         break;
9618     case FPROUNDING_ZERO:
9619         rmode = float_round_to_zero;
9620         break;
9621     }
9622     return rmode;
9623 }
9624 
9625 /* CRC helpers.
9626  * The upper bytes of val (above the number specified by 'bytes') must have
9627  * been zeroed out by the caller.
9628  */
9629 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
9630 {
9631     uint8_t buf[4];
9632 
9633     stl_le_p(buf, val);
9634 
9635     /* zlib crc32 converts the accumulator and output to one's complement.  */
9636     return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
9637 }
9638 
9639 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
9640 {
9641     uint8_t buf[4];
9642 
9643     stl_le_p(buf, val);
9644 
9645     /* Linux crc32c converts the output to one's complement.  */
9646     return crc32c(acc, buf, bytes) ^ 0xffffffff;
9647 }
9648