xref: /qemu/target/arm/helper.c (revision 658178c3)
1 /*
2  * ARM generic helpers.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "trace.h"
12 #include "cpu.h"
13 #include "internals.h"
14 #include "cpu-features.h"
15 #include "exec/helper-proto.h"
16 #include "qemu/main-loop.h"
17 #include "qemu/timer.h"
18 #include "qemu/bitops.h"
19 #include "qemu/crc32c.h"
20 #include "qemu/qemu-print.h"
21 #include "exec/exec-all.h"
22 #include <zlib.h> /* For crc32 */
23 #include "hw/irq.h"
24 #include "sysemu/cpu-timers.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/tcg.h"
27 #include "qapi/error.h"
28 #include "qemu/guest-random.h"
29 #ifdef CONFIG_TCG
30 #include "semihosting/common-semi.h"
31 #endif
32 #include "cpregs.h"
33 
34 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
35 
36 static void switch_mode(CPUARMState *env, int mode);
37 
38 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
39 {
40     assert(ri->fieldoffset);
41     if (cpreg_field_is_64bit(ri)) {
42         return CPREG_FIELD64(env, ri);
43     } else {
44         return CPREG_FIELD32(env, ri);
45     }
46 }
47 
48 void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
49 {
50     assert(ri->fieldoffset);
51     if (cpreg_field_is_64bit(ri)) {
52         CPREG_FIELD64(env, ri) = value;
53     } else {
54         CPREG_FIELD32(env, ri) = value;
55     }
56 }
57 
58 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
59 {
60     return (char *)env + ri->fieldoffset;
61 }
62 
63 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
64 {
65     /* Raw read of a coprocessor register (as needed for migration, etc). */
66     if (ri->type & ARM_CP_CONST) {
67         return ri->resetvalue;
68     } else if (ri->raw_readfn) {
69         return ri->raw_readfn(env, ri);
70     } else if (ri->readfn) {
71         return ri->readfn(env, ri);
72     } else {
73         return raw_read(env, ri);
74     }
75 }
76 
77 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
78                              uint64_t v)
79 {
80     /*
81      * Raw write of a coprocessor register (as needed for migration, etc).
82      * Note that constant registers are treated as write-ignored; the
83      * caller should check for success by whether a readback gives the
84      * value written.
85      */
86     if (ri->type & ARM_CP_CONST) {
87         return;
88     } else if (ri->raw_writefn) {
89         ri->raw_writefn(env, ri, v);
90     } else if (ri->writefn) {
91         ri->writefn(env, ri, v);
92     } else {
93         raw_write(env, ri, v);
94     }
95 }
96 
97 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
98 {
99    /*
100     * Return true if the regdef would cause an assertion if you called
101     * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
102     * program bug for it not to have the NO_RAW flag).
103     * NB that returning false here doesn't necessarily mean that calling
104     * read/write_raw_cp_reg() is safe, because we can't distinguish "has
105     * read/write access functions which are safe for raw use" from "has
106     * read/write access functions which have side effects but has forgotten
107     * to provide raw access functions".
108     * The tests here line up with the conditions in read/write_raw_cp_reg()
109     * and assertions in raw_read()/raw_write().
110     */
111     if ((ri->type & ARM_CP_CONST) ||
112         ri->fieldoffset ||
113         ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
114         return false;
115     }
116     return true;
117 }
118 
119 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
120 {
121     /* Write the coprocessor state from cpu->env to the (index,value) list. */
122     int i;
123     bool ok = true;
124 
125     for (i = 0; i < cpu->cpreg_array_len; i++) {
126         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
127         const ARMCPRegInfo *ri;
128         uint64_t newval;
129 
130         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
131         if (!ri) {
132             ok = false;
133             continue;
134         }
135         if (ri->type & ARM_CP_NO_RAW) {
136             continue;
137         }
138 
139         newval = read_raw_cp_reg(&cpu->env, ri);
140         if (kvm_sync) {
141             /*
142              * Only sync if the previous list->cpustate sync succeeded.
143              * Rather than tracking the success/failure state for every
144              * item in the list, we just recheck "does the raw write we must
145              * have made in write_list_to_cpustate() read back OK" here.
146              */
147             uint64_t oldval = cpu->cpreg_values[i];
148 
149             if (oldval == newval) {
150                 continue;
151             }
152 
153             write_raw_cp_reg(&cpu->env, ri, oldval);
154             if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
155                 continue;
156             }
157 
158             write_raw_cp_reg(&cpu->env, ri, newval);
159         }
160         cpu->cpreg_values[i] = newval;
161     }
162     return ok;
163 }
164 
165 bool write_list_to_cpustate(ARMCPU *cpu)
166 {
167     int i;
168     bool ok = true;
169 
170     for (i = 0; i < cpu->cpreg_array_len; i++) {
171         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
172         uint64_t v = cpu->cpreg_values[i];
173         const ARMCPRegInfo *ri;
174 
175         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
176         if (!ri) {
177             ok = false;
178             continue;
179         }
180         if (ri->type & ARM_CP_NO_RAW) {
181             continue;
182         }
183         /*
184          * Write value and confirm it reads back as written
185          * (to catch read-only registers and partially read-only
186          * registers where the incoming migration value doesn't match)
187          */
188         write_raw_cp_reg(&cpu->env, ri, v);
189         if (read_raw_cp_reg(&cpu->env, ri) != v) {
190             ok = false;
191         }
192     }
193     return ok;
194 }
195 
196 static void add_cpreg_to_list(gpointer key, gpointer opaque)
197 {
198     ARMCPU *cpu = opaque;
199     uint32_t regidx = (uintptr_t)key;
200     const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
201 
202     if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
203         cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
204         /* The value array need not be initialized at this point */
205         cpu->cpreg_array_len++;
206     }
207 }
208 
209 static void count_cpreg(gpointer key, gpointer opaque)
210 {
211     ARMCPU *cpu = opaque;
212     const ARMCPRegInfo *ri;
213 
214     ri = g_hash_table_lookup(cpu->cp_regs, key);
215 
216     if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
217         cpu->cpreg_array_len++;
218     }
219 }
220 
221 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
222 {
223     uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a);
224     uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b);
225 
226     if (aidx > bidx) {
227         return 1;
228     }
229     if (aidx < bidx) {
230         return -1;
231     }
232     return 0;
233 }
234 
235 void init_cpreg_list(ARMCPU *cpu)
236 {
237     /*
238      * Initialise the cpreg_tuples[] array based on the cp_regs hash.
239      * Note that we require cpreg_tuples[] to be sorted by key ID.
240      */
241     GList *keys;
242     int arraylen;
243 
244     keys = g_hash_table_get_keys(cpu->cp_regs);
245     keys = g_list_sort(keys, cpreg_key_compare);
246 
247     cpu->cpreg_array_len = 0;
248 
249     g_list_foreach(keys, count_cpreg, cpu);
250 
251     arraylen = cpu->cpreg_array_len;
252     cpu->cpreg_indexes = g_new(uint64_t, arraylen);
253     cpu->cpreg_values = g_new(uint64_t, arraylen);
254     cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
255     cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
256     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
257     cpu->cpreg_array_len = 0;
258 
259     g_list_foreach(keys, add_cpreg_to_list, cpu);
260 
261     assert(cpu->cpreg_array_len == arraylen);
262 
263     g_list_free(keys);
264 }
265 
266 static bool arm_pan_enabled(CPUARMState *env)
267 {
268     if (is_a64(env)) {
269         if ((arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1)) {
270             return false;
271         }
272         return env->pstate & PSTATE_PAN;
273     } else {
274         return env->uncached_cpsr & CPSR_PAN;
275     }
276 }
277 
278 /*
279  * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
280  */
281 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
282                                         const ARMCPRegInfo *ri,
283                                         bool isread)
284 {
285     if (!is_a64(env) && arm_current_el(env) == 3 &&
286         arm_is_secure_below_el3(env)) {
287         return CP_ACCESS_TRAP_UNCATEGORIZED;
288     }
289     return CP_ACCESS_OK;
290 }
291 
292 /*
293  * Some secure-only AArch32 registers trap to EL3 if used from
294  * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
295  * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
296  * We assume that the .access field is set to PL1_RW.
297  */
298 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
299                                             const ARMCPRegInfo *ri,
300                                             bool isread)
301 {
302     if (arm_current_el(env) == 3) {
303         return CP_ACCESS_OK;
304     }
305     if (arm_is_secure_below_el3(env)) {
306         if (env->cp15.scr_el3 & SCR_EEL2) {
307             return CP_ACCESS_TRAP_EL2;
308         }
309         return CP_ACCESS_TRAP_EL3;
310     }
311     /* This will be EL1 NS and EL2 NS, which just UNDEF */
312     return CP_ACCESS_TRAP_UNCATEGORIZED;
313 }
314 
315 /*
316  * Check for traps to performance monitor registers, which are controlled
317  * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
318  */
319 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
320                                  bool isread)
321 {
322     int el = arm_current_el(env);
323     uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
324 
325     if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
326         return CP_ACCESS_TRAP_EL2;
327     }
328     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
329         return CP_ACCESS_TRAP_EL3;
330     }
331     return CP_ACCESS_OK;
332 }
333 
334 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM.  */
335 CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
336                                bool isread)
337 {
338     if (arm_current_el(env) == 1) {
339         uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
340         if (arm_hcr_el2_eff(env) & trap) {
341             return CP_ACCESS_TRAP_EL2;
342         }
343     }
344     return CP_ACCESS_OK;
345 }
346 
347 /* Check for traps from EL1 due to HCR_EL2.TSW.  */
348 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
349                                  bool isread)
350 {
351     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
352         return CP_ACCESS_TRAP_EL2;
353     }
354     return CP_ACCESS_OK;
355 }
356 
357 /* Check for traps from EL1 due to HCR_EL2.TACR.  */
358 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
359                                   bool isread)
360 {
361     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
362         return CP_ACCESS_TRAP_EL2;
363     }
364     return CP_ACCESS_OK;
365 }
366 
367 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
368 static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
369                                   bool isread)
370 {
371     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
372         return CP_ACCESS_TRAP_EL2;
373     }
374     return CP_ACCESS_OK;
375 }
376 
377 /* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBIS. */
378 static CPAccessResult access_ttlbis(CPUARMState *env, const ARMCPRegInfo *ri,
379                                     bool isread)
380 {
381     if (arm_current_el(env) == 1 &&
382         (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBIS))) {
383         return CP_ACCESS_TRAP_EL2;
384     }
385     return CP_ACCESS_OK;
386 }
387 
388 #ifdef TARGET_AARCH64
389 /* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBOS. */
390 static CPAccessResult access_ttlbos(CPUARMState *env, const ARMCPRegInfo *ri,
391                                     bool isread)
392 {
393     if (arm_current_el(env) == 1 &&
394         (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBOS))) {
395         return CP_ACCESS_TRAP_EL2;
396     }
397     return CP_ACCESS_OK;
398 }
399 #endif
400 
401 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
402 {
403     ARMCPU *cpu = env_archcpu(env);
404 
405     raw_write(env, ri, value);
406     tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
407 }
408 
409 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
410 {
411     ARMCPU *cpu = env_archcpu(env);
412 
413     if (raw_read(env, ri) != value) {
414         /*
415          * Unlike real hardware the qemu TLB uses virtual addresses,
416          * not modified virtual addresses, so this causes a TLB flush.
417          */
418         tlb_flush(CPU(cpu));
419         raw_write(env, ri, value);
420     }
421 }
422 
423 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
424                              uint64_t value)
425 {
426     ARMCPU *cpu = env_archcpu(env);
427 
428     if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
429         && !extended_addresses_enabled(env)) {
430         /*
431          * For VMSA (when not using the LPAE long descriptor page table
432          * format) this register includes the ASID, so do a TLB flush.
433          * For PMSA it is purely a process ID and no action is needed.
434          */
435         tlb_flush(CPU(cpu));
436     }
437     raw_write(env, ri, value);
438 }
439 
440 static int alle1_tlbmask(CPUARMState *env)
441 {
442     /*
443      * Note that the 'ALL' scope must invalidate both stage 1 and
444      * stage 2 translations, whereas most other scopes only invalidate
445      * stage 1 translations.
446      */
447     return (ARMMMUIdxBit_E10_1 |
448             ARMMMUIdxBit_E10_1_PAN |
449             ARMMMUIdxBit_E10_0 |
450             ARMMMUIdxBit_Stage2 |
451             ARMMMUIdxBit_Stage2_S);
452 }
453 
454 
455 /* IS variants of TLB operations must affect all cores */
456 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
457                              uint64_t value)
458 {
459     CPUState *cs = env_cpu(env);
460 
461     tlb_flush_all_cpus_synced(cs);
462 }
463 
464 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
465                              uint64_t value)
466 {
467     CPUState *cs = env_cpu(env);
468 
469     tlb_flush_all_cpus_synced(cs);
470 }
471 
472 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
473                              uint64_t value)
474 {
475     CPUState *cs = env_cpu(env);
476 
477     tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
478 }
479 
480 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
481                              uint64_t value)
482 {
483     CPUState *cs = env_cpu(env);
484 
485     tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
486 }
487 
488 /*
489  * Non-IS variants of TLB operations are upgraded to
490  * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
491  * force broadcast of these operations.
492  */
493 static bool tlb_force_broadcast(CPUARMState *env)
494 {
495     return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
496 }
497 
498 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
499                           uint64_t value)
500 {
501     /* Invalidate all (TLBIALL) */
502     CPUState *cs = env_cpu(env);
503 
504     if (tlb_force_broadcast(env)) {
505         tlb_flush_all_cpus_synced(cs);
506     } else {
507         tlb_flush(cs);
508     }
509 }
510 
511 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
512                           uint64_t value)
513 {
514     /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
515     CPUState *cs = env_cpu(env);
516 
517     value &= TARGET_PAGE_MASK;
518     if (tlb_force_broadcast(env)) {
519         tlb_flush_page_all_cpus_synced(cs, value);
520     } else {
521         tlb_flush_page(cs, value);
522     }
523 }
524 
525 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
526                            uint64_t value)
527 {
528     /* Invalidate by ASID (TLBIASID) */
529     CPUState *cs = env_cpu(env);
530 
531     if (tlb_force_broadcast(env)) {
532         tlb_flush_all_cpus_synced(cs);
533     } else {
534         tlb_flush(cs);
535     }
536 }
537 
538 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
539                            uint64_t value)
540 {
541     /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
542     CPUState *cs = env_cpu(env);
543 
544     value &= TARGET_PAGE_MASK;
545     if (tlb_force_broadcast(env)) {
546         tlb_flush_page_all_cpus_synced(cs, value);
547     } else {
548         tlb_flush_page(cs, value);
549     }
550 }
551 
552 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
553                                uint64_t value)
554 {
555     CPUState *cs = env_cpu(env);
556 
557     tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
558 }
559 
560 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
561                                   uint64_t value)
562 {
563     CPUState *cs = env_cpu(env);
564 
565     tlb_flush_by_mmuidx_all_cpus_synced(cs, alle1_tlbmask(env));
566 }
567 
568 
569 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
570                               uint64_t value)
571 {
572     CPUState *cs = env_cpu(env);
573 
574     tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
575 }
576 
577 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
578                                  uint64_t value)
579 {
580     CPUState *cs = env_cpu(env);
581 
582     tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
583 }
584 
585 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
586                               uint64_t value)
587 {
588     CPUState *cs = env_cpu(env);
589     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
590 
591     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
592 }
593 
594 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
595                                  uint64_t value)
596 {
597     CPUState *cs = env_cpu(env);
598     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
599 
600     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
601                                              ARMMMUIdxBit_E2);
602 }
603 
604 static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
605                                 uint64_t value)
606 {
607     CPUState *cs = env_cpu(env);
608     uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
609 
610     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
611 }
612 
613 static void tlbiipas2is_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
614                                 uint64_t value)
615 {
616     CPUState *cs = env_cpu(env);
617     uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
618 
619     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_Stage2);
620 }
621 
622 static const ARMCPRegInfo cp_reginfo[] = {
623     /*
624      * Define the secure and non-secure FCSE identifier CP registers
625      * separately because there is no secure bank in V8 (no _EL3).  This allows
626      * the secure register to be properly reset and migrated. There is also no
627      * v8 EL1 version of the register so the non-secure instance stands alone.
628      */
629     { .name = "FCSEIDR",
630       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
631       .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
632       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
633       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
634     { .name = "FCSEIDR_S",
635       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
636       .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
637       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
638       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
639     /*
640      * Define the secure and non-secure context identifier CP registers
641      * separately because there is no secure bank in V8 (no _EL3).  This allows
642      * the secure register to be properly reset and migrated.  In the
643      * non-secure case, the 32-bit register will have reset and migration
644      * disabled during registration as it is handled by the 64-bit instance.
645      */
646     { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
647       .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
648       .access = PL1_RW, .accessfn = access_tvm_trvm,
649       .fgt = FGT_CONTEXTIDR_EL1,
650       .nv2_redirect_offset = 0x108 | NV2_REDIR_NV1,
651       .secure = ARM_CP_SECSTATE_NS,
652       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
653       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
654     { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
655       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
656       .access = PL1_RW, .accessfn = access_tvm_trvm,
657       .secure = ARM_CP_SECSTATE_S,
658       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
659       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
660 };
661 
662 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
663     /*
664      * NB: Some of these registers exist in v8 but with more precise
665      * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
666      */
667     /* MMU Domain access control / MPU write buffer control */
668     { .name = "DACR",
669       .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
670       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
671       .writefn = dacr_write, .raw_writefn = raw_write,
672       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
673                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
674     /*
675      * ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
676      * For v6 and v5, these mappings are overly broad.
677      */
678     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
679       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
680     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
681       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
682     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
683       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
684     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
685       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
686     /* Cache maintenance ops; some of this space may be overridden later. */
687     { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
688       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
689       .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
690 };
691 
692 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
693     /*
694      * Not all pre-v6 cores implemented this WFI, so this is slightly
695      * over-broad.
696      */
697     { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
698       .access = PL1_W, .type = ARM_CP_WFI },
699 };
700 
701 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
702     /*
703      * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
704      * is UNPREDICTABLE; we choose to NOP as most implementations do).
705      */
706     { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
707       .access = PL1_W, .type = ARM_CP_WFI },
708     /*
709      * L1 cache lockdown. Not architectural in v6 and earlier but in practice
710      * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
711      * OMAPCP will override this space.
712      */
713     { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
714       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
715       .resetvalue = 0 },
716     { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
717       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
718       .resetvalue = 0 },
719     /* v6 doesn't have the cache ID registers but Linux reads them anyway */
720     { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
721       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
722       .resetvalue = 0 },
723     /*
724      * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
725      * implementing it as RAZ means the "debug architecture version" bits
726      * will read as a reserved value, which should cause Linux to not try
727      * to use the debug hardware.
728      */
729     { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
730       .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
731     /*
732      * MMU TLB control. Note that the wildcarding means we cover not just
733      * the unified TLB ops but also the dside/iside/inner-shareable variants.
734      */
735     { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
736       .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
737       .type = ARM_CP_NO_RAW },
738     { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
739       .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
740       .type = ARM_CP_NO_RAW },
741     { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
742       .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
743       .type = ARM_CP_NO_RAW },
744     { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
745       .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
746       .type = ARM_CP_NO_RAW },
747     { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
748       .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
749     { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
750       .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
751 };
752 
753 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
754                         uint64_t value)
755 {
756     uint32_t mask = 0;
757 
758     /* In ARMv8 most bits of CPACR_EL1 are RES0. */
759     if (!arm_feature(env, ARM_FEATURE_V8)) {
760         /*
761          * ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
762          * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
763          * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
764          */
765         if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
766             /* VFP coprocessor: cp10 & cp11 [23:20] */
767             mask |= R_CPACR_ASEDIS_MASK |
768                     R_CPACR_D32DIS_MASK |
769                     R_CPACR_CP11_MASK |
770                     R_CPACR_CP10_MASK;
771 
772             if (!arm_feature(env, ARM_FEATURE_NEON)) {
773                 /* ASEDIS [31] bit is RAO/WI */
774                 value |= R_CPACR_ASEDIS_MASK;
775             }
776 
777             /*
778              * VFPv3 and upwards with NEON implement 32 double precision
779              * registers (D0-D31).
780              */
781             if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
782                 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
783                 value |= R_CPACR_D32DIS_MASK;
784             }
785         }
786         value &= mask;
787     }
788 
789     /*
790      * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
791      * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
792      */
793     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
794         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
795         mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK;
796         value = (value & ~mask) | (env->cp15.cpacr_el1 & mask);
797     }
798 
799     env->cp15.cpacr_el1 = value;
800 }
801 
802 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
803 {
804     /*
805      * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
806      * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
807      */
808     uint64_t value = env->cp15.cpacr_el1;
809 
810     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
811         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
812         value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK);
813     }
814     return value;
815 }
816 
817 
818 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
819 {
820     /*
821      * Call cpacr_write() so that we reset with the correct RAO bits set
822      * for our CPU features.
823      */
824     cpacr_write(env, ri, 0);
825 }
826 
827 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
828                                    bool isread)
829 {
830     if (arm_feature(env, ARM_FEATURE_V8)) {
831         /* Check if CPACR accesses are to be trapped to EL2 */
832         if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
833             FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) {
834             return CP_ACCESS_TRAP_EL2;
835         /* Check if CPACR accesses are to be trapped to EL3 */
836         } else if (arm_current_el(env) < 3 &&
837                    FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
838             return CP_ACCESS_TRAP_EL3;
839         }
840     }
841 
842     return CP_ACCESS_OK;
843 }
844 
845 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
846                                   bool isread)
847 {
848     /* Check if CPTR accesses are set to trap to EL3 */
849     if (arm_current_el(env) == 2 &&
850         FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
851         return CP_ACCESS_TRAP_EL3;
852     }
853 
854     return CP_ACCESS_OK;
855 }
856 
857 static const ARMCPRegInfo v6_cp_reginfo[] = {
858     /* prefetch by MVA in v6, NOP in v7 */
859     { .name = "MVA_prefetch",
860       .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
861       .access = PL1_W, .type = ARM_CP_NOP },
862     /*
863      * We need to break the TB after ISB to execute self-modifying code
864      * correctly and also to take any pending interrupts immediately.
865      * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
866      */
867     { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
868       .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
869     { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
870       .access = PL0_W, .type = ARM_CP_NOP },
871     { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
872       .access = PL0_W, .type = ARM_CP_NOP },
873     { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
874       .access = PL1_RW, .accessfn = access_tvm_trvm,
875       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
876                              offsetof(CPUARMState, cp15.ifar_ns) },
877       .resetvalue = 0, },
878     /*
879      * Watchpoint Fault Address Register : should actually only be present
880      * for 1136, 1176, 11MPCore.
881      */
882     { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
883       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
884     { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
885       .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
886       .fgt = FGT_CPACR_EL1,
887       .nv2_redirect_offset = 0x100 | NV2_REDIR_NV1,
888       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
889       .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
890 };
891 
892 typedef struct pm_event {
893     uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
894     /* If the event is supported on this CPU (used to generate PMCEID[01]) */
895     bool (*supported)(CPUARMState *);
896     /*
897      * Retrieve the current count of the underlying event. The programmed
898      * counters hold a difference from the return value from this function
899      */
900     uint64_t (*get_count)(CPUARMState *);
901     /*
902      * Return how many nanoseconds it will take (at a minimum) for count events
903      * to occur. A negative value indicates the counter will never overflow, or
904      * that the counter has otherwise arranged for the overflow bit to be set
905      * and the PMU interrupt to be raised on overflow.
906      */
907     int64_t (*ns_per_count)(uint64_t);
908 } pm_event;
909 
910 static bool event_always_supported(CPUARMState *env)
911 {
912     return true;
913 }
914 
915 static uint64_t swinc_get_count(CPUARMState *env)
916 {
917     /*
918      * SW_INCR events are written directly to the pmevcntr's by writes to
919      * PMSWINC, so there is no underlying count maintained by the PMU itself
920      */
921     return 0;
922 }
923 
924 static int64_t swinc_ns_per(uint64_t ignored)
925 {
926     return -1;
927 }
928 
929 /*
930  * Return the underlying cycle count for the PMU cycle counters. If we're in
931  * usermode, simply return 0.
932  */
933 static uint64_t cycles_get_count(CPUARMState *env)
934 {
935 #ifndef CONFIG_USER_ONLY
936     return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
937                    ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
938 #else
939     return cpu_get_host_ticks();
940 #endif
941 }
942 
943 #ifndef CONFIG_USER_ONLY
944 static int64_t cycles_ns_per(uint64_t cycles)
945 {
946     return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
947 }
948 
949 static bool instructions_supported(CPUARMState *env)
950 {
951     /* Precise instruction counting */
952     return icount_enabled() == ICOUNT_PRECISE;
953 }
954 
955 static uint64_t instructions_get_count(CPUARMState *env)
956 {
957     assert(icount_enabled() == ICOUNT_PRECISE);
958     return (uint64_t)icount_get_raw();
959 }
960 
961 static int64_t instructions_ns_per(uint64_t icount)
962 {
963     assert(icount_enabled() == ICOUNT_PRECISE);
964     return icount_to_ns((int64_t)icount);
965 }
966 #endif
967 
968 static bool pmuv3p1_events_supported(CPUARMState *env)
969 {
970     /* For events which are supported in any v8.1 PMU */
971     return cpu_isar_feature(any_pmuv3p1, env_archcpu(env));
972 }
973 
974 static bool pmuv3p4_events_supported(CPUARMState *env)
975 {
976     /* For events which are supported in any v8.1 PMU */
977     return cpu_isar_feature(any_pmuv3p4, env_archcpu(env));
978 }
979 
980 static uint64_t zero_event_get_count(CPUARMState *env)
981 {
982     /* For events which on QEMU never fire, so their count is always zero */
983     return 0;
984 }
985 
986 static int64_t zero_event_ns_per(uint64_t cycles)
987 {
988     /* An event which never fires can never overflow */
989     return -1;
990 }
991 
992 static const pm_event pm_events[] = {
993     { .number = 0x000, /* SW_INCR */
994       .supported = event_always_supported,
995       .get_count = swinc_get_count,
996       .ns_per_count = swinc_ns_per,
997     },
998 #ifndef CONFIG_USER_ONLY
999     { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
1000       .supported = instructions_supported,
1001       .get_count = instructions_get_count,
1002       .ns_per_count = instructions_ns_per,
1003     },
1004     { .number = 0x011, /* CPU_CYCLES, Cycle */
1005       .supported = event_always_supported,
1006       .get_count = cycles_get_count,
1007       .ns_per_count = cycles_ns_per,
1008     },
1009 #endif
1010     { .number = 0x023, /* STALL_FRONTEND */
1011       .supported = pmuv3p1_events_supported,
1012       .get_count = zero_event_get_count,
1013       .ns_per_count = zero_event_ns_per,
1014     },
1015     { .number = 0x024, /* STALL_BACKEND */
1016       .supported = pmuv3p1_events_supported,
1017       .get_count = zero_event_get_count,
1018       .ns_per_count = zero_event_ns_per,
1019     },
1020     { .number = 0x03c, /* STALL */
1021       .supported = pmuv3p4_events_supported,
1022       .get_count = zero_event_get_count,
1023       .ns_per_count = zero_event_ns_per,
1024     },
1025 };
1026 
1027 /*
1028  * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1029  * events (i.e. the statistical profiling extension), this implementation
1030  * should first be updated to something sparse instead of the current
1031  * supported_event_map[] array.
1032  */
1033 #define MAX_EVENT_ID 0x3c
1034 #define UNSUPPORTED_EVENT UINT16_MAX
1035 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1036 
1037 /*
1038  * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1039  * of ARM event numbers to indices in our pm_events array.
1040  *
1041  * Note: Events in the 0x40XX range are not currently supported.
1042  */
1043 void pmu_init(ARMCPU *cpu)
1044 {
1045     unsigned int i;
1046 
1047     /*
1048      * Empty supported_event_map and cpu->pmceid[01] before adding supported
1049      * events to them
1050      */
1051     for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1052         supported_event_map[i] = UNSUPPORTED_EVENT;
1053     }
1054     cpu->pmceid0 = 0;
1055     cpu->pmceid1 = 0;
1056 
1057     for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1058         const pm_event *cnt = &pm_events[i];
1059         assert(cnt->number <= MAX_EVENT_ID);
1060         /* We do not currently support events in the 0x40xx range */
1061         assert(cnt->number <= 0x3f);
1062 
1063         if (cnt->supported(&cpu->env)) {
1064             supported_event_map[cnt->number] = i;
1065             uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1066             if (cnt->number & 0x20) {
1067                 cpu->pmceid1 |= event_mask;
1068             } else {
1069                 cpu->pmceid0 |= event_mask;
1070             }
1071         }
1072     }
1073 }
1074 
1075 /*
1076  * Check at runtime whether a PMU event is supported for the current machine
1077  */
1078 static bool event_supported(uint16_t number)
1079 {
1080     if (number > MAX_EVENT_ID) {
1081         return false;
1082     }
1083     return supported_event_map[number] != UNSUPPORTED_EVENT;
1084 }
1085 
1086 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1087                                    bool isread)
1088 {
1089     /*
1090      * Performance monitor registers user accessibility is controlled
1091      * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1092      * trapping to EL2 or EL3 for other accesses.
1093      */
1094     int el = arm_current_el(env);
1095     uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1096 
1097     if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1098         return CP_ACCESS_TRAP;
1099     }
1100     if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
1101         return CP_ACCESS_TRAP_EL2;
1102     }
1103     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1104         return CP_ACCESS_TRAP_EL3;
1105     }
1106 
1107     return CP_ACCESS_OK;
1108 }
1109 
1110 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1111                                            const ARMCPRegInfo *ri,
1112                                            bool isread)
1113 {
1114     /* ER: event counter read trap control */
1115     if (arm_feature(env, ARM_FEATURE_V8)
1116         && arm_current_el(env) == 0
1117         && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1118         && isread) {
1119         return CP_ACCESS_OK;
1120     }
1121 
1122     return pmreg_access(env, ri, isread);
1123 }
1124 
1125 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1126                                          const ARMCPRegInfo *ri,
1127                                          bool isread)
1128 {
1129     /* SW: software increment write trap control */
1130     if (arm_feature(env, ARM_FEATURE_V8)
1131         && arm_current_el(env) == 0
1132         && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1133         && !isread) {
1134         return CP_ACCESS_OK;
1135     }
1136 
1137     return pmreg_access(env, ri, isread);
1138 }
1139 
1140 static CPAccessResult pmreg_access_selr(CPUARMState *env,
1141                                         const ARMCPRegInfo *ri,
1142                                         bool isread)
1143 {
1144     /* ER: event counter read trap control */
1145     if (arm_feature(env, ARM_FEATURE_V8)
1146         && arm_current_el(env) == 0
1147         && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1148         return CP_ACCESS_OK;
1149     }
1150 
1151     return pmreg_access(env, ri, isread);
1152 }
1153 
1154 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1155                                          const ARMCPRegInfo *ri,
1156                                          bool isread)
1157 {
1158     /* CR: cycle counter read trap control */
1159     if (arm_feature(env, ARM_FEATURE_V8)
1160         && arm_current_el(env) == 0
1161         && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1162         && isread) {
1163         return CP_ACCESS_OK;
1164     }
1165 
1166     return pmreg_access(env, ri, isread);
1167 }
1168 
1169 /*
1170  * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at.
1171  * We use these to decide whether we need to wrap a write to MDCR_EL2
1172  * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls.
1173  */
1174 #define MDCR_EL2_PMU_ENABLE_BITS \
1175     (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
1176 #define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
1177 
1178 /*
1179  * Returns true if the counter (pass 31 for PMCCNTR) should count events using
1180  * the current EL, security state, and register configuration.
1181  */
1182 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1183 {
1184     uint64_t filter;
1185     bool e, p, u, nsk, nsu, nsh, m;
1186     bool enabled, prohibited = false, filtered;
1187     bool secure = arm_is_secure(env);
1188     int el = arm_current_el(env);
1189     uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1190     uint8_t hpmn = mdcr_el2 & MDCR_HPMN;
1191 
1192     if (!arm_feature(env, ARM_FEATURE_PMU)) {
1193         return false;
1194     }
1195 
1196     if (!arm_feature(env, ARM_FEATURE_EL2) ||
1197             (counter < hpmn || counter == 31)) {
1198         e = env->cp15.c9_pmcr & PMCRE;
1199     } else {
1200         e = mdcr_el2 & MDCR_HPME;
1201     }
1202     enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1203 
1204     /* Is event counting prohibited? */
1205     if (el == 2 && (counter < hpmn || counter == 31)) {
1206         prohibited = mdcr_el2 & MDCR_HPMD;
1207     }
1208     if (secure) {
1209         prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
1210     }
1211 
1212     if (counter == 31) {
1213         /*
1214          * The cycle counter defaults to running. PMCR.DP says "disable
1215          * the cycle counter when event counting is prohibited".
1216          * Some MDCR bits disable the cycle counter specifically.
1217          */
1218         prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
1219         if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1220             if (secure) {
1221                 prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD);
1222             }
1223             if (el == 2) {
1224                 prohibited = prohibited || (mdcr_el2 & MDCR_HCCD);
1225             }
1226         }
1227     }
1228 
1229     if (counter == 31) {
1230         filter = env->cp15.pmccfiltr_el0;
1231     } else {
1232         filter = env->cp15.c14_pmevtyper[counter];
1233     }
1234 
1235     p   = filter & PMXEVTYPER_P;
1236     u   = filter & PMXEVTYPER_U;
1237     nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1238     nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1239     nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1240     m   = arm_el_is_aa64(env, 1) &&
1241               arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1242 
1243     if (el == 0) {
1244         filtered = secure ? u : u != nsu;
1245     } else if (el == 1) {
1246         filtered = secure ? p : p != nsk;
1247     } else if (el == 2) {
1248         filtered = !nsh;
1249     } else { /* EL3 */
1250         filtered = m != p;
1251     }
1252 
1253     if (counter != 31) {
1254         /*
1255          * If not checking PMCCNTR, ensure the counter is setup to an event we
1256          * support
1257          */
1258         uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1259         if (!event_supported(event)) {
1260             return false;
1261         }
1262     }
1263 
1264     return enabled && !prohibited && !filtered;
1265 }
1266 
1267 static void pmu_update_irq(CPUARMState *env)
1268 {
1269     ARMCPU *cpu = env_archcpu(env);
1270     qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1271             (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1272 }
1273 
1274 static bool pmccntr_clockdiv_enabled(CPUARMState *env)
1275 {
1276     /*
1277      * Return true if the clock divider is enabled and the cycle counter
1278      * is supposed to tick only once every 64 clock cycles. This is
1279      * controlled by PMCR.D, but if PMCR.LC is set to enable the long
1280      * (64-bit) cycle counter PMCR.D has no effect.
1281      */
1282     return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
1283 }
1284 
1285 static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
1286 {
1287     /* Return true if the specified event counter is configured to be 64 bit */
1288 
1289     /* This isn't intended to be used with the cycle counter */
1290     assert(counter < 31);
1291 
1292     if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1293         return false;
1294     }
1295 
1296     if (arm_feature(env, ARM_FEATURE_EL2)) {
1297         /*
1298          * MDCR_EL2.HLP still applies even when EL2 is disabled in the
1299          * current security state, so we don't use arm_mdcr_el2_eff() here.
1300          */
1301         bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
1302         int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1303 
1304         if (counter >= hpmn) {
1305             return hlp;
1306         }
1307     }
1308     return env->cp15.c9_pmcr & PMCRLP;
1309 }
1310 
1311 /*
1312  * Ensure c15_ccnt is the guest-visible count so that operations such as
1313  * enabling/disabling the counter or filtering, modifying the count itself,
1314  * etc. can be done logically. This is essentially a no-op if the counter is
1315  * not enabled at the time of the call.
1316  */
1317 static void pmccntr_op_start(CPUARMState *env)
1318 {
1319     uint64_t cycles = cycles_get_count(env);
1320 
1321     if (pmu_counter_enabled(env, 31)) {
1322         uint64_t eff_cycles = cycles;
1323         if (pmccntr_clockdiv_enabled(env)) {
1324             eff_cycles /= 64;
1325         }
1326 
1327         uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1328 
1329         uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1330                                  1ull << 63 : 1ull << 31;
1331         if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1332             env->cp15.c9_pmovsr |= (1ULL << 31);
1333             pmu_update_irq(env);
1334         }
1335 
1336         env->cp15.c15_ccnt = new_pmccntr;
1337     }
1338     env->cp15.c15_ccnt_delta = cycles;
1339 }
1340 
1341 /*
1342  * If PMCCNTR is enabled, recalculate the delta between the clock and the
1343  * guest-visible count. A call to pmccntr_op_finish should follow every call to
1344  * pmccntr_op_start.
1345  */
1346 static void pmccntr_op_finish(CPUARMState *env)
1347 {
1348     if (pmu_counter_enabled(env, 31)) {
1349 #ifndef CONFIG_USER_ONLY
1350         /* Calculate when the counter will next overflow */
1351         uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1352         if (!(env->cp15.c9_pmcr & PMCRLC)) {
1353             remaining_cycles = (uint32_t)remaining_cycles;
1354         }
1355         int64_t overflow_in = cycles_ns_per(remaining_cycles);
1356 
1357         if (overflow_in > 0) {
1358             int64_t overflow_at;
1359 
1360             if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1361                                  overflow_in, &overflow_at)) {
1362                 ARMCPU *cpu = env_archcpu(env);
1363                 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1364             }
1365         }
1366 #endif
1367 
1368         uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1369         if (pmccntr_clockdiv_enabled(env)) {
1370             prev_cycles /= 64;
1371         }
1372         env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1373     }
1374 }
1375 
1376 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1377 {
1378 
1379     uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1380     uint64_t count = 0;
1381     if (event_supported(event)) {
1382         uint16_t event_idx = supported_event_map[event];
1383         count = pm_events[event_idx].get_count(env);
1384     }
1385 
1386     if (pmu_counter_enabled(env, counter)) {
1387         uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1388         uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
1389             1ULL << 63 : 1ULL << 31;
1390 
1391         if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
1392             env->cp15.c9_pmovsr |= (1 << counter);
1393             pmu_update_irq(env);
1394         }
1395         env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1396     }
1397     env->cp15.c14_pmevcntr_delta[counter] = count;
1398 }
1399 
1400 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1401 {
1402     if (pmu_counter_enabled(env, counter)) {
1403 #ifndef CONFIG_USER_ONLY
1404         uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1405         uint16_t event_idx = supported_event_map[event];
1406         uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
1407         int64_t overflow_in;
1408 
1409         if (!pmevcntr_is_64_bit(env, counter)) {
1410             delta = (uint32_t)delta;
1411         }
1412         overflow_in = pm_events[event_idx].ns_per_count(delta);
1413 
1414         if (overflow_in > 0) {
1415             int64_t overflow_at;
1416 
1417             if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1418                                  overflow_in, &overflow_at)) {
1419                 ARMCPU *cpu = env_archcpu(env);
1420                 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1421             }
1422         }
1423 #endif
1424 
1425         env->cp15.c14_pmevcntr_delta[counter] -=
1426             env->cp15.c14_pmevcntr[counter];
1427     }
1428 }
1429 
1430 void pmu_op_start(CPUARMState *env)
1431 {
1432     unsigned int i;
1433     pmccntr_op_start(env);
1434     for (i = 0; i < pmu_num_counters(env); i++) {
1435         pmevcntr_op_start(env, i);
1436     }
1437 }
1438 
1439 void pmu_op_finish(CPUARMState *env)
1440 {
1441     unsigned int i;
1442     pmccntr_op_finish(env);
1443     for (i = 0; i < pmu_num_counters(env); i++) {
1444         pmevcntr_op_finish(env, i);
1445     }
1446 }
1447 
1448 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1449 {
1450     pmu_op_start(&cpu->env);
1451 }
1452 
1453 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1454 {
1455     pmu_op_finish(&cpu->env);
1456 }
1457 
1458 void arm_pmu_timer_cb(void *opaque)
1459 {
1460     ARMCPU *cpu = opaque;
1461 
1462     /*
1463      * Update all the counter values based on the current underlying counts,
1464      * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1465      * has the effect of setting the cpu->pmu_timer to the next earliest time a
1466      * counter may expire.
1467      */
1468     pmu_op_start(&cpu->env);
1469     pmu_op_finish(&cpu->env);
1470 }
1471 
1472 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1473                        uint64_t value)
1474 {
1475     pmu_op_start(env);
1476 
1477     if (value & PMCRC) {
1478         /* The counter has been reset */
1479         env->cp15.c15_ccnt = 0;
1480     }
1481 
1482     if (value & PMCRP) {
1483         unsigned int i;
1484         for (i = 0; i < pmu_num_counters(env); i++) {
1485             env->cp15.c14_pmevcntr[i] = 0;
1486         }
1487     }
1488 
1489     env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
1490     env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
1491 
1492     pmu_op_finish(env);
1493 }
1494 
1495 static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1496 {
1497     uint64_t pmcr = env->cp15.c9_pmcr;
1498 
1499     /*
1500      * If EL2 is implemented and enabled for the current security state, reads
1501      * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN.
1502      */
1503     if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) {
1504         pmcr &= ~PMCRN_MASK;
1505         pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT;
1506     }
1507 
1508     return pmcr;
1509 }
1510 
1511 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1512                           uint64_t value)
1513 {
1514     unsigned int i;
1515     uint64_t overflow_mask, new_pmswinc;
1516 
1517     for (i = 0; i < pmu_num_counters(env); i++) {
1518         /* Increment a counter's count iff: */
1519         if ((value & (1 << i)) && /* counter's bit is set */
1520                 /* counter is enabled and not filtered */
1521                 pmu_counter_enabled(env, i) &&
1522                 /* counter is SW_INCR */
1523                 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1524             pmevcntr_op_start(env, i);
1525 
1526             /*
1527              * Detect if this write causes an overflow since we can't predict
1528              * PMSWINC overflows like we can for other events
1529              */
1530             new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1531 
1532             overflow_mask = pmevcntr_is_64_bit(env, i) ?
1533                 1ULL << 63 : 1ULL << 31;
1534 
1535             if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
1536                 env->cp15.c9_pmovsr |= (1 << i);
1537                 pmu_update_irq(env);
1538             }
1539 
1540             env->cp15.c14_pmevcntr[i] = new_pmswinc;
1541 
1542             pmevcntr_op_finish(env, i);
1543         }
1544     }
1545 }
1546 
1547 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1548 {
1549     uint64_t ret;
1550     pmccntr_op_start(env);
1551     ret = env->cp15.c15_ccnt;
1552     pmccntr_op_finish(env);
1553     return ret;
1554 }
1555 
1556 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1557                          uint64_t value)
1558 {
1559     /*
1560      * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1561      * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1562      * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1563      * accessed.
1564      */
1565     env->cp15.c9_pmselr = value & 0x1f;
1566 }
1567 
1568 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1569                         uint64_t value)
1570 {
1571     pmccntr_op_start(env);
1572     env->cp15.c15_ccnt = value;
1573     pmccntr_op_finish(env);
1574 }
1575 
1576 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1577                             uint64_t value)
1578 {
1579     uint64_t cur_val = pmccntr_read(env, NULL);
1580 
1581     pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1582 }
1583 
1584 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1585                             uint64_t value)
1586 {
1587     pmccntr_op_start(env);
1588     env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1589     pmccntr_op_finish(env);
1590 }
1591 
1592 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1593                             uint64_t value)
1594 {
1595     pmccntr_op_start(env);
1596     /* M is not accessible from AArch32 */
1597     env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1598         (value & PMCCFILTR);
1599     pmccntr_op_finish(env);
1600 }
1601 
1602 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1603 {
1604     /* M is not visible in AArch32 */
1605     return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1606 }
1607 
1608 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1609                             uint64_t value)
1610 {
1611     pmu_op_start(env);
1612     value &= pmu_counter_mask(env);
1613     env->cp15.c9_pmcnten |= value;
1614     pmu_op_finish(env);
1615 }
1616 
1617 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1618                              uint64_t value)
1619 {
1620     pmu_op_start(env);
1621     value &= pmu_counter_mask(env);
1622     env->cp15.c9_pmcnten &= ~value;
1623     pmu_op_finish(env);
1624 }
1625 
1626 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1627                          uint64_t value)
1628 {
1629     value &= pmu_counter_mask(env);
1630     env->cp15.c9_pmovsr &= ~value;
1631     pmu_update_irq(env);
1632 }
1633 
1634 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1635                          uint64_t value)
1636 {
1637     value &= pmu_counter_mask(env);
1638     env->cp15.c9_pmovsr |= value;
1639     pmu_update_irq(env);
1640 }
1641 
1642 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1643                              uint64_t value, const uint8_t counter)
1644 {
1645     if (counter == 31) {
1646         pmccfiltr_write(env, ri, value);
1647     } else if (counter < pmu_num_counters(env)) {
1648         pmevcntr_op_start(env, counter);
1649 
1650         /*
1651          * If this counter's event type is changing, store the current
1652          * underlying count for the new type in c14_pmevcntr_delta[counter] so
1653          * pmevcntr_op_finish has the correct baseline when it converts back to
1654          * a delta.
1655          */
1656         uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1657             PMXEVTYPER_EVTCOUNT;
1658         uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1659         if (old_event != new_event) {
1660             uint64_t count = 0;
1661             if (event_supported(new_event)) {
1662                 uint16_t event_idx = supported_event_map[new_event];
1663                 count = pm_events[event_idx].get_count(env);
1664             }
1665             env->cp15.c14_pmevcntr_delta[counter] = count;
1666         }
1667 
1668         env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1669         pmevcntr_op_finish(env, counter);
1670     }
1671     /*
1672      * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1673      * PMSELR value is equal to or greater than the number of implemented
1674      * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1675      */
1676 }
1677 
1678 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1679                                const uint8_t counter)
1680 {
1681     if (counter == 31) {
1682         return env->cp15.pmccfiltr_el0;
1683     } else if (counter < pmu_num_counters(env)) {
1684         return env->cp15.c14_pmevtyper[counter];
1685     } else {
1686       /*
1687        * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1688        * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1689        */
1690         return 0;
1691     }
1692 }
1693 
1694 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1695                               uint64_t value)
1696 {
1697     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1698     pmevtyper_write(env, ri, value, counter);
1699 }
1700 
1701 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1702                                uint64_t value)
1703 {
1704     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1705     env->cp15.c14_pmevtyper[counter] = value;
1706 
1707     /*
1708      * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1709      * pmu_op_finish calls when loading saved state for a migration. Because
1710      * we're potentially updating the type of event here, the value written to
1711      * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a
1712      * different counter type. Therefore, we need to set this value to the
1713      * current count for the counter type we're writing so that pmu_op_finish
1714      * has the correct count for its calculation.
1715      */
1716     uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1717     if (event_supported(event)) {
1718         uint16_t event_idx = supported_event_map[event];
1719         env->cp15.c14_pmevcntr_delta[counter] =
1720             pm_events[event_idx].get_count(env);
1721     }
1722 }
1723 
1724 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1725 {
1726     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1727     return pmevtyper_read(env, ri, counter);
1728 }
1729 
1730 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1731                              uint64_t value)
1732 {
1733     pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1734 }
1735 
1736 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1737 {
1738     return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1739 }
1740 
1741 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1742                              uint64_t value, uint8_t counter)
1743 {
1744     if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1745         /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
1746         value &= MAKE_64BIT_MASK(0, 32);
1747     }
1748     if (counter < pmu_num_counters(env)) {
1749         pmevcntr_op_start(env, counter);
1750         env->cp15.c14_pmevcntr[counter] = value;
1751         pmevcntr_op_finish(env, counter);
1752     }
1753     /*
1754      * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1755      * are CONSTRAINED UNPREDICTABLE.
1756      */
1757 }
1758 
1759 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1760                               uint8_t counter)
1761 {
1762     if (counter < pmu_num_counters(env)) {
1763         uint64_t ret;
1764         pmevcntr_op_start(env, counter);
1765         ret = env->cp15.c14_pmevcntr[counter];
1766         pmevcntr_op_finish(env, counter);
1767         if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1768             /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
1769             ret &= MAKE_64BIT_MASK(0, 32);
1770         }
1771         return ret;
1772     } else {
1773       /*
1774        * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1775        * are CONSTRAINED UNPREDICTABLE.
1776        */
1777         return 0;
1778     }
1779 }
1780 
1781 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1782                              uint64_t value)
1783 {
1784     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1785     pmevcntr_write(env, ri, value, counter);
1786 }
1787 
1788 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1789 {
1790     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1791     return pmevcntr_read(env, ri, counter);
1792 }
1793 
1794 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1795                              uint64_t value)
1796 {
1797     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1798     assert(counter < pmu_num_counters(env));
1799     env->cp15.c14_pmevcntr[counter] = value;
1800     pmevcntr_write(env, ri, value, counter);
1801 }
1802 
1803 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1804 {
1805     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1806     assert(counter < pmu_num_counters(env));
1807     return env->cp15.c14_pmevcntr[counter];
1808 }
1809 
1810 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1811                              uint64_t value)
1812 {
1813     pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1814 }
1815 
1816 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1817 {
1818     return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1819 }
1820 
1821 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1822                             uint64_t value)
1823 {
1824     if (arm_feature(env, ARM_FEATURE_V8)) {
1825         env->cp15.c9_pmuserenr = value & 0xf;
1826     } else {
1827         env->cp15.c9_pmuserenr = value & 1;
1828     }
1829 }
1830 
1831 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1832                              uint64_t value)
1833 {
1834     /* We have no event counters so only the C bit can be changed */
1835     value &= pmu_counter_mask(env);
1836     env->cp15.c9_pminten |= value;
1837     pmu_update_irq(env);
1838 }
1839 
1840 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1841                              uint64_t value)
1842 {
1843     value &= pmu_counter_mask(env);
1844     env->cp15.c9_pminten &= ~value;
1845     pmu_update_irq(env);
1846 }
1847 
1848 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1849                        uint64_t value)
1850 {
1851     /*
1852      * Note that even though the AArch64 view of this register has bits
1853      * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1854      * architectural requirements for bits which are RES0 only in some
1855      * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1856      * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1857      */
1858     raw_write(env, ri, value & ~0x1FULL);
1859 }
1860 
1861 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1862 {
1863     /* Begin with base v8.0 state.  */
1864     uint64_t valid_mask = 0x3fff;
1865     ARMCPU *cpu = env_archcpu(env);
1866     uint64_t changed;
1867 
1868     /*
1869      * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
1870      * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64.
1871      * Instead, choose the format based on the mode of EL3.
1872      */
1873     if (arm_el_is_aa64(env, 3)) {
1874         value |= SCR_FW | SCR_AW;      /* RES1 */
1875         valid_mask &= ~SCR_NET;        /* RES0 */
1876 
1877         if (!cpu_isar_feature(aa64_aa32_el1, cpu) &&
1878             !cpu_isar_feature(aa64_aa32_el2, cpu)) {
1879             value |= SCR_RW;           /* RAO/WI */
1880         }
1881         if (cpu_isar_feature(aa64_ras, cpu)) {
1882             valid_mask |= SCR_TERR;
1883         }
1884         if (cpu_isar_feature(aa64_lor, cpu)) {
1885             valid_mask |= SCR_TLOR;
1886         }
1887         if (cpu_isar_feature(aa64_pauth, cpu)) {
1888             valid_mask |= SCR_API | SCR_APK;
1889         }
1890         if (cpu_isar_feature(aa64_sel2, cpu)) {
1891             valid_mask |= SCR_EEL2;
1892         } else if (cpu_isar_feature(aa64_rme, cpu)) {
1893             /* With RME and without SEL2, NS is RES1 (R_GSWWH, I_DJJQJ). */
1894             value |= SCR_NS;
1895         }
1896         if (cpu_isar_feature(aa64_mte, cpu)) {
1897             valid_mask |= SCR_ATA;
1898         }
1899         if (cpu_isar_feature(aa64_scxtnum, cpu)) {
1900             valid_mask |= SCR_ENSCXT;
1901         }
1902         if (cpu_isar_feature(aa64_doublefault, cpu)) {
1903             valid_mask |= SCR_EASE | SCR_NMEA;
1904         }
1905         if (cpu_isar_feature(aa64_sme, cpu)) {
1906             valid_mask |= SCR_ENTP2;
1907         }
1908         if (cpu_isar_feature(aa64_hcx, cpu)) {
1909             valid_mask |= SCR_HXEN;
1910         }
1911         if (cpu_isar_feature(aa64_fgt, cpu)) {
1912             valid_mask |= SCR_FGTEN;
1913         }
1914         if (cpu_isar_feature(aa64_rme, cpu)) {
1915             valid_mask |= SCR_NSE | SCR_GPF;
1916         }
1917     } else {
1918         valid_mask &= ~(SCR_RW | SCR_ST);
1919         if (cpu_isar_feature(aa32_ras, cpu)) {
1920             valid_mask |= SCR_TERR;
1921         }
1922     }
1923 
1924     if (!arm_feature(env, ARM_FEATURE_EL2)) {
1925         valid_mask &= ~SCR_HCE;
1926 
1927         /*
1928          * On ARMv7, SMD (or SCD as it is called in v7) is only
1929          * supported if EL2 exists. The bit is UNK/SBZP when
1930          * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1931          * when EL2 is unavailable.
1932          * On ARMv8, this bit is always available.
1933          */
1934         if (arm_feature(env, ARM_FEATURE_V7) &&
1935             !arm_feature(env, ARM_FEATURE_V8)) {
1936             valid_mask &= ~SCR_SMD;
1937         }
1938     }
1939 
1940     /* Clear all-context RES0 bits.  */
1941     value &= valid_mask;
1942     changed = env->cp15.scr_el3 ^ value;
1943     env->cp15.scr_el3 = value;
1944 
1945     /*
1946      * If SCR_EL3.{NS,NSE} changes, i.e. change of security state,
1947      * we must invalidate all TLBs below EL3.
1948      */
1949     if (changed & (SCR_NS | SCR_NSE)) {
1950         tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
1951                                            ARMMMUIdxBit_E20_0 |
1952                                            ARMMMUIdxBit_E10_1 |
1953                                            ARMMMUIdxBit_E20_2 |
1954                                            ARMMMUIdxBit_E10_1_PAN |
1955                                            ARMMMUIdxBit_E20_2_PAN |
1956                                            ARMMMUIdxBit_E2));
1957     }
1958 }
1959 
1960 static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1961 {
1962     /*
1963      * scr_write will set the RES1 bits on an AArch64-only CPU.
1964      * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
1965      */
1966     scr_write(env, ri, 0);
1967 }
1968 
1969 static CPAccessResult access_tid4(CPUARMState *env,
1970                                   const ARMCPRegInfo *ri,
1971                                   bool isread)
1972 {
1973     if (arm_current_el(env) == 1 &&
1974         (arm_hcr_el2_eff(env) & (HCR_TID2 | HCR_TID4))) {
1975         return CP_ACCESS_TRAP_EL2;
1976     }
1977 
1978     return CP_ACCESS_OK;
1979 }
1980 
1981 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1982 {
1983     ARMCPU *cpu = env_archcpu(env);
1984 
1985     /*
1986      * Acquire the CSSELR index from the bank corresponding to the CCSIDR
1987      * bank
1988      */
1989     uint32_t index = A32_BANKED_REG_GET(env, csselr,
1990                                         ri->secure & ARM_CP_SECSTATE_S);
1991 
1992     return cpu->ccsidr[index];
1993 }
1994 
1995 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1996                          uint64_t value)
1997 {
1998     raw_write(env, ri, value & 0xf);
1999 }
2000 
2001 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2002 {
2003     CPUState *cs = env_cpu(env);
2004     bool el1 = arm_current_el(env) == 1;
2005     uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
2006     uint64_t ret = 0;
2007 
2008     if (hcr_el2 & HCR_IMO) {
2009         if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
2010             ret |= CPSR_I;
2011         }
2012     } else {
2013         if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
2014             ret |= CPSR_I;
2015         }
2016     }
2017 
2018     if (hcr_el2 & HCR_FMO) {
2019         if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
2020             ret |= CPSR_F;
2021         }
2022     } else {
2023         if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
2024             ret |= CPSR_F;
2025         }
2026     }
2027 
2028     if (hcr_el2 & HCR_AMO) {
2029         if (cs->interrupt_request & CPU_INTERRUPT_VSERR) {
2030             ret |= CPSR_A;
2031         }
2032     }
2033 
2034     return ret;
2035 }
2036 
2037 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2038                                        bool isread)
2039 {
2040     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
2041         return CP_ACCESS_TRAP_EL2;
2042     }
2043 
2044     return CP_ACCESS_OK;
2045 }
2046 
2047 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2048                                        bool isread)
2049 {
2050     if (arm_feature(env, ARM_FEATURE_V8)) {
2051         return access_aa64_tid1(env, ri, isread);
2052     }
2053 
2054     return CP_ACCESS_OK;
2055 }
2056 
2057 static const ARMCPRegInfo v7_cp_reginfo[] = {
2058     /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
2059     { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
2060       .access = PL1_W, .type = ARM_CP_NOP },
2061     /*
2062      * Performance monitors are implementation defined in v7,
2063      * but with an ARM recommended set of registers, which we
2064      * follow.
2065      *
2066      * Performance registers fall into three categories:
2067      *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2068      *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2069      *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2070      * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2071      * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2072      */
2073     { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
2074       .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO,
2075       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2076       .writefn = pmcntenset_write,
2077       .accessfn = pmreg_access,
2078       .fgt = FGT_PMCNTEN,
2079       .raw_writefn = raw_write },
2080     { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO,
2081       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
2082       .access = PL0_RW, .accessfn = pmreg_access,
2083       .fgt = FGT_PMCNTEN,
2084       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
2085       .writefn = pmcntenset_write, .raw_writefn = raw_write },
2086     { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
2087       .access = PL0_RW,
2088       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2089       .accessfn = pmreg_access,
2090       .fgt = FGT_PMCNTEN,
2091       .writefn = pmcntenclr_write,
2092       .type = ARM_CP_ALIAS | ARM_CP_IO },
2093     { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
2094       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
2095       .access = PL0_RW, .accessfn = pmreg_access,
2096       .fgt = FGT_PMCNTEN,
2097       .type = ARM_CP_ALIAS | ARM_CP_IO,
2098       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
2099       .writefn = pmcntenclr_write },
2100     { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
2101       .access = PL0_RW, .type = ARM_CP_IO,
2102       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2103       .accessfn = pmreg_access,
2104       .fgt = FGT_PMOVS,
2105       .writefn = pmovsr_write,
2106       .raw_writefn = raw_write },
2107     { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
2108       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
2109       .access = PL0_RW, .accessfn = pmreg_access,
2110       .fgt = FGT_PMOVS,
2111       .type = ARM_CP_ALIAS | ARM_CP_IO,
2112       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2113       .writefn = pmovsr_write,
2114       .raw_writefn = raw_write },
2115     { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
2116       .access = PL0_W, .accessfn = pmreg_access_swinc,
2117       .fgt = FGT_PMSWINC_EL0,
2118       .type = ARM_CP_NO_RAW | ARM_CP_IO,
2119       .writefn = pmswinc_write },
2120     { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
2121       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
2122       .access = PL0_W, .accessfn = pmreg_access_swinc,
2123       .fgt = FGT_PMSWINC_EL0,
2124       .type = ARM_CP_NO_RAW | ARM_CP_IO,
2125       .writefn = pmswinc_write },
2126     { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
2127       .access = PL0_RW, .type = ARM_CP_ALIAS,
2128       .fgt = FGT_PMSELR_EL0,
2129       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
2130       .accessfn = pmreg_access_selr, .writefn = pmselr_write,
2131       .raw_writefn = raw_write},
2132     { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
2133       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
2134       .access = PL0_RW, .accessfn = pmreg_access_selr,
2135       .fgt = FGT_PMSELR_EL0,
2136       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
2137       .writefn = pmselr_write, .raw_writefn = raw_write, },
2138     { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
2139       .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
2140       .fgt = FGT_PMCCNTR_EL0,
2141       .readfn = pmccntr_read, .writefn = pmccntr_write32,
2142       .accessfn = pmreg_access_ccntr },
2143     { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
2144       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
2145       .access = PL0_RW, .accessfn = pmreg_access_ccntr,
2146       .fgt = FGT_PMCCNTR_EL0,
2147       .type = ARM_CP_IO,
2148       .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2149       .readfn = pmccntr_read, .writefn = pmccntr_write,
2150       .raw_readfn = raw_read, .raw_writefn = raw_write, },
2151     { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2152       .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2153       .access = PL0_RW, .accessfn = pmreg_access,
2154       .fgt = FGT_PMCCFILTR_EL0,
2155       .type = ARM_CP_ALIAS | ARM_CP_IO,
2156       .resetvalue = 0, },
2157     { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2158       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
2159       .writefn = pmccfiltr_write, .raw_writefn = raw_write,
2160       .access = PL0_RW, .accessfn = pmreg_access,
2161       .fgt = FGT_PMCCFILTR_EL0,
2162       .type = ARM_CP_IO,
2163       .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2164       .resetvalue = 0, },
2165     { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
2166       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2167       .accessfn = pmreg_access,
2168       .fgt = FGT_PMEVTYPERN_EL0,
2169       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2170     { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2171       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
2172       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2173       .accessfn = pmreg_access,
2174       .fgt = FGT_PMEVTYPERN_EL0,
2175       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2176     { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2177       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2178       .accessfn = pmreg_access_xevcntr,
2179       .fgt = FGT_PMEVCNTRN_EL0,
2180       .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2181     { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2182       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2183       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2184       .accessfn = pmreg_access_xevcntr,
2185       .fgt = FGT_PMEVCNTRN_EL0,
2186       .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2187     { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2188       .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2189       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2190       .resetvalue = 0,
2191       .writefn = pmuserenr_write, .raw_writefn = raw_write },
2192     { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2193       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2194       .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2195       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2196       .resetvalue = 0,
2197       .writefn = pmuserenr_write, .raw_writefn = raw_write },
2198     { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2199       .access = PL1_RW, .accessfn = access_tpm,
2200       .fgt = FGT_PMINTEN,
2201       .type = ARM_CP_ALIAS | ARM_CP_IO,
2202       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2203       .resetvalue = 0,
2204       .writefn = pmintenset_write, .raw_writefn = raw_write },
2205     { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2206       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2207       .access = PL1_RW, .accessfn = access_tpm,
2208       .fgt = FGT_PMINTEN,
2209       .type = ARM_CP_IO,
2210       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2211       .writefn = pmintenset_write, .raw_writefn = raw_write,
2212       .resetvalue = 0x0 },
2213     { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2214       .access = PL1_RW, .accessfn = access_tpm,
2215       .fgt = FGT_PMINTEN,
2216       .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2217       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2218       .writefn = pmintenclr_write, },
2219     { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2220       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2221       .access = PL1_RW, .accessfn = access_tpm,
2222       .fgt = FGT_PMINTEN,
2223       .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2224       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2225       .writefn = pmintenclr_write },
2226     { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2227       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2228       .access = PL1_R,
2229       .accessfn = access_tid4,
2230       .fgt = FGT_CCSIDR_EL1,
2231       .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2232     { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2233       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2234       .access = PL1_RW,
2235       .accessfn = access_tid4,
2236       .fgt = FGT_CSSELR_EL1,
2237       .writefn = csselr_write, .resetvalue = 0,
2238       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2239                              offsetof(CPUARMState, cp15.csselr_ns) } },
2240     /*
2241      * Auxiliary ID register: this actually has an IMPDEF value but for now
2242      * just RAZ for all cores:
2243      */
2244     { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2245       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2246       .access = PL1_R, .type = ARM_CP_CONST,
2247       .accessfn = access_aa64_tid1,
2248       .fgt = FGT_AIDR_EL1,
2249       .resetvalue = 0 },
2250     /*
2251      * Auxiliary fault status registers: these also are IMPDEF, and we
2252      * choose to RAZ/WI for all cores.
2253      */
2254     { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2255       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2256       .access = PL1_RW, .accessfn = access_tvm_trvm,
2257       .fgt = FGT_AFSR0_EL1,
2258       .nv2_redirect_offset = 0x128 | NV2_REDIR_NV1,
2259       .type = ARM_CP_CONST, .resetvalue = 0 },
2260     { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2261       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2262       .access = PL1_RW, .accessfn = access_tvm_trvm,
2263       .fgt = FGT_AFSR1_EL1,
2264       .nv2_redirect_offset = 0x130 | NV2_REDIR_NV1,
2265       .type = ARM_CP_CONST, .resetvalue = 0 },
2266     /*
2267      * MAIR can just read-as-written because we don't implement caches
2268      * and so don't need to care about memory attributes.
2269      */
2270     { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2271       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2272       .access = PL1_RW, .accessfn = access_tvm_trvm,
2273       .fgt = FGT_MAIR_EL1,
2274       .nv2_redirect_offset = 0x140 | NV2_REDIR_NV1,
2275       .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2276       .resetvalue = 0 },
2277     { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2278       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2279       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2280       .resetvalue = 0 },
2281     /*
2282      * For non-long-descriptor page tables these are PRRR and NMRR;
2283      * regardless they still act as reads-as-written for QEMU.
2284      */
2285      /*
2286       * MAIR0/1 are defined separately from their 64-bit counterpart which
2287       * allows them to assign the correct fieldoffset based on the endianness
2288       * handled in the field definitions.
2289       */
2290     { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2291       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2292       .access = PL1_RW, .accessfn = access_tvm_trvm,
2293       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2294                              offsetof(CPUARMState, cp15.mair0_ns) },
2295       .resetfn = arm_cp_reset_ignore },
2296     { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2297       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
2298       .access = PL1_RW, .accessfn = access_tvm_trvm,
2299       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2300                              offsetof(CPUARMState, cp15.mair1_ns) },
2301       .resetfn = arm_cp_reset_ignore },
2302     { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2303       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2304       .fgt = FGT_ISR_EL1,
2305       .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2306     /* 32 bit ITLB invalidates */
2307     { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2308       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2309       .writefn = tlbiall_write },
2310     { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2311       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2312       .writefn = tlbimva_write },
2313     { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2314       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2315       .writefn = tlbiasid_write },
2316     /* 32 bit DTLB invalidates */
2317     { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2318       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2319       .writefn = tlbiall_write },
2320     { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2321       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2322       .writefn = tlbimva_write },
2323     { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2324       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2325       .writefn = tlbiasid_write },
2326     /* 32 bit TLB invalidates */
2327     { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2328       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2329       .writefn = tlbiall_write },
2330     { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2331       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2332       .writefn = tlbimva_write },
2333     { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2334       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2335       .writefn = tlbiasid_write },
2336     { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2337       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2338       .writefn = tlbimvaa_write },
2339 };
2340 
2341 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2342     /* 32 bit TLB invalidates, Inner Shareable */
2343     { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2344       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
2345       .writefn = tlbiall_is_write },
2346     { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2347       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
2348       .writefn = tlbimva_is_write },
2349     { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2350       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
2351       .writefn = tlbiasid_is_write },
2352     { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2353       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
2354       .writefn = tlbimvaa_is_write },
2355 };
2356 
2357 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2358     /* PMOVSSET is not implemented in v7 before v7ve */
2359     { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2360       .access = PL0_RW, .accessfn = pmreg_access,
2361       .fgt = FGT_PMOVS,
2362       .type = ARM_CP_ALIAS | ARM_CP_IO,
2363       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2364       .writefn = pmovsset_write,
2365       .raw_writefn = raw_write },
2366     { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2367       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2368       .access = PL0_RW, .accessfn = pmreg_access,
2369       .fgt = FGT_PMOVS,
2370       .type = ARM_CP_ALIAS | ARM_CP_IO,
2371       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2372       .writefn = pmovsset_write,
2373       .raw_writefn = raw_write },
2374 };
2375 
2376 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2377                         uint64_t value)
2378 {
2379     value &= 1;
2380     env->teecr = value;
2381 }
2382 
2383 static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2384                                    bool isread)
2385 {
2386     /*
2387      * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
2388      * at all, so we don't need to check whether we're v8A.
2389      */
2390     if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
2391         (env->cp15.hstr_el2 & HSTR_TTEE)) {
2392         return CP_ACCESS_TRAP_EL2;
2393     }
2394     return CP_ACCESS_OK;
2395 }
2396 
2397 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2398                                     bool isread)
2399 {
2400     if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2401         return CP_ACCESS_TRAP;
2402     }
2403     return teecr_access(env, ri, isread);
2404 }
2405 
2406 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2407     { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2408       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2409       .resetvalue = 0,
2410       .writefn = teecr_write, .accessfn = teecr_access },
2411     { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2412       .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2413       .accessfn = teehbr_access, .resetvalue = 0 },
2414 };
2415 
2416 static const ARMCPRegInfo v6k_cp_reginfo[] = {
2417     { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2418       .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2419       .access = PL0_RW,
2420       .fgt = FGT_TPIDR_EL0,
2421       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2422     { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2423       .access = PL0_RW,
2424       .fgt = FGT_TPIDR_EL0,
2425       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2426                              offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2427       .resetfn = arm_cp_reset_ignore },
2428     { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2429       .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2430       .access = PL0_R | PL1_W,
2431       .fgt = FGT_TPIDRRO_EL0,
2432       .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2433       .resetvalue = 0},
2434     { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2435       .access = PL0_R | PL1_W,
2436       .fgt = FGT_TPIDRRO_EL0,
2437       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2438                              offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2439       .resetfn = arm_cp_reset_ignore },
2440     { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2441       .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2442       .access = PL1_RW,
2443       .fgt = FGT_TPIDR_EL1,
2444       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2445     { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2446       .access = PL1_RW,
2447       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2448                              offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2449       .resetvalue = 0 },
2450 };
2451 
2452 #ifndef CONFIG_USER_ONLY
2453 
2454 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2455                                        bool isread)
2456 {
2457     /*
2458      * CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2459      * Writable only at the highest implemented exception level.
2460      */
2461     int el = arm_current_el(env);
2462     uint64_t hcr;
2463     uint32_t cntkctl;
2464 
2465     switch (el) {
2466     case 0:
2467         hcr = arm_hcr_el2_eff(env);
2468         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2469             cntkctl = env->cp15.cnthctl_el2;
2470         } else {
2471             cntkctl = env->cp15.c14_cntkctl;
2472         }
2473         if (!extract32(cntkctl, 0, 2)) {
2474             return CP_ACCESS_TRAP;
2475         }
2476         break;
2477     case 1:
2478         if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2479             arm_is_secure_below_el3(env)) {
2480             /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2481             return CP_ACCESS_TRAP_UNCATEGORIZED;
2482         }
2483         break;
2484     case 2:
2485     case 3:
2486         break;
2487     }
2488 
2489     if (!isread && el < arm_highest_el(env)) {
2490         return CP_ACCESS_TRAP_UNCATEGORIZED;
2491     }
2492 
2493     return CP_ACCESS_OK;
2494 }
2495 
2496 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2497                                         bool isread)
2498 {
2499     unsigned int cur_el = arm_current_el(env);
2500     bool has_el2 = arm_is_el2_enabled(env);
2501     uint64_t hcr = arm_hcr_el2_eff(env);
2502 
2503     switch (cur_el) {
2504     case 0:
2505         /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2506         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2507             return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2508                     ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2509         }
2510 
2511         /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2512         if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2513             return CP_ACCESS_TRAP;
2514         }
2515         /* fall through */
2516     case 1:
2517         /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2518         if (has_el2 && timeridx == GTIMER_PHYS &&
2519             (hcr & HCR_E2H
2520              ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2521              : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2522             return CP_ACCESS_TRAP_EL2;
2523         }
2524         break;
2525     }
2526     return CP_ACCESS_OK;
2527 }
2528 
2529 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2530                                       bool isread)
2531 {
2532     unsigned int cur_el = arm_current_el(env);
2533     bool has_el2 = arm_is_el2_enabled(env);
2534     uint64_t hcr = arm_hcr_el2_eff(env);
2535 
2536     switch (cur_el) {
2537     case 0:
2538         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2539             /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2540             return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2541                     ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2542         }
2543 
2544         /*
2545          * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2546          * EL0 if EL0[PV]TEN is zero.
2547          */
2548         if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2549             return CP_ACCESS_TRAP;
2550         }
2551         /* fall through */
2552 
2553     case 1:
2554         if (has_el2 && timeridx == GTIMER_PHYS) {
2555             if (hcr & HCR_E2H) {
2556                 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2557                 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2558                     return CP_ACCESS_TRAP_EL2;
2559                 }
2560             } else {
2561                 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2562                 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2563                     return CP_ACCESS_TRAP_EL2;
2564                 }
2565             }
2566         }
2567         break;
2568     }
2569     return CP_ACCESS_OK;
2570 }
2571 
2572 static CPAccessResult gt_pct_access(CPUARMState *env,
2573                                     const ARMCPRegInfo *ri,
2574                                     bool isread)
2575 {
2576     return gt_counter_access(env, GTIMER_PHYS, isread);
2577 }
2578 
2579 static CPAccessResult gt_vct_access(CPUARMState *env,
2580                                     const ARMCPRegInfo *ri,
2581                                     bool isread)
2582 {
2583     return gt_counter_access(env, GTIMER_VIRT, isread);
2584 }
2585 
2586 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2587                                        bool isread)
2588 {
2589     return gt_timer_access(env, GTIMER_PHYS, isread);
2590 }
2591 
2592 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2593                                        bool isread)
2594 {
2595     return gt_timer_access(env, GTIMER_VIRT, isread);
2596 }
2597 
2598 static CPAccessResult gt_stimer_access(CPUARMState *env,
2599                                        const ARMCPRegInfo *ri,
2600                                        bool isread)
2601 {
2602     /*
2603      * The AArch64 register view of the secure physical timer is
2604      * always accessible from EL3, and configurably accessible from
2605      * Secure EL1.
2606      */
2607     switch (arm_current_el(env)) {
2608     case 1:
2609         if (!arm_is_secure(env)) {
2610             return CP_ACCESS_TRAP;
2611         }
2612         if (!(env->cp15.scr_el3 & SCR_ST)) {
2613             return CP_ACCESS_TRAP_EL3;
2614         }
2615         return CP_ACCESS_OK;
2616     case 0:
2617     case 2:
2618         return CP_ACCESS_TRAP;
2619     case 3:
2620         return CP_ACCESS_OK;
2621     default:
2622         g_assert_not_reached();
2623     }
2624 }
2625 
2626 static uint64_t gt_get_countervalue(CPUARMState *env)
2627 {
2628     ARMCPU *cpu = env_archcpu(env);
2629 
2630     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
2631 }
2632 
2633 static void gt_update_irq(ARMCPU *cpu, int timeridx)
2634 {
2635     CPUARMState *env = &cpu->env;
2636     uint64_t cnthctl = env->cp15.cnthctl_el2;
2637     ARMSecuritySpace ss = arm_security_space(env);
2638     /* ISTATUS && !IMASK */
2639     int irqstate = (env->cp15.c14_timer[timeridx].ctl & 6) == 4;
2640 
2641     /*
2642      * If bit CNTHCTL_EL2.CNT[VP]MASK is set, it overrides IMASK.
2643      * It is RES0 in Secure and NonSecure state.
2644      */
2645     if ((ss == ARMSS_Root || ss == ARMSS_Realm) &&
2646         ((timeridx == GTIMER_VIRT && (cnthctl & CNTHCTL_CNTVMASK)) ||
2647          (timeridx == GTIMER_PHYS && (cnthctl & CNTHCTL_CNTPMASK)))) {
2648         irqstate = 0;
2649     }
2650 
2651     qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2652     trace_arm_gt_update_irq(timeridx, irqstate);
2653 }
2654 
2655 void gt_rme_post_el_change(ARMCPU *cpu, void *ignored)
2656 {
2657     /*
2658      * Changing security state between Root and Secure/NonSecure, which may
2659      * happen when switching EL, can change the effective value of CNTHCTL_EL2
2660      * mask bits. Update the IRQ state accordingly.
2661      */
2662     gt_update_irq(cpu, GTIMER_VIRT);
2663     gt_update_irq(cpu, GTIMER_PHYS);
2664 }
2665 
2666 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2667 {
2668     ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2669 
2670     if (gt->ctl & 1) {
2671         /*
2672          * Timer enabled: calculate and set current ISTATUS, irq, and
2673          * reset timer to when ISTATUS next has to change
2674          */
2675         uint64_t offset = timeridx == GTIMER_VIRT ?
2676                                       cpu->env.cp15.cntvoff_el2 : 0;
2677         uint64_t count = gt_get_countervalue(&cpu->env);
2678         /* Note that this must be unsigned 64 bit arithmetic: */
2679         int istatus = count - offset >= gt->cval;
2680         uint64_t nexttick;
2681 
2682         gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2683 
2684         if (istatus) {
2685             /*
2686              * Next transition is when (count - offset) rolls back over to 0.
2687              * If offset > count then this is when count == offset;
2688              * if offset <= count then this is when count == offset + 2^64
2689              * For the latter case we set nexttick to an "as far in future
2690              * as possible" value and let the code below handle it.
2691              */
2692             if (offset > count) {
2693                 nexttick = offset;
2694             } else {
2695                 nexttick = UINT64_MAX;
2696             }
2697         } else {
2698             /*
2699              * Next transition is when (count - offset) == cval, i.e.
2700              * when count == (cval + offset).
2701              * If that would overflow, then again we set up the next interrupt
2702              * for "as far in the future as possible" for the code below.
2703              */
2704             if (uadd64_overflow(gt->cval, offset, &nexttick)) {
2705                 nexttick = UINT64_MAX;
2706             }
2707         }
2708         /*
2709          * Note that the desired next expiry time might be beyond the
2710          * signed-64-bit range of a QEMUTimer -- in this case we just
2711          * set the timer for as far in the future as possible. When the
2712          * timer expires we will reset the timer for any remaining period.
2713          */
2714         if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
2715             timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2716         } else {
2717             timer_mod(cpu->gt_timer[timeridx], nexttick);
2718         }
2719         trace_arm_gt_recalc(timeridx, nexttick);
2720     } else {
2721         /* Timer disabled: ISTATUS and timer output always clear */
2722         gt->ctl &= ~4;
2723         timer_del(cpu->gt_timer[timeridx]);
2724         trace_arm_gt_recalc_disabled(timeridx);
2725     }
2726     gt_update_irq(cpu, timeridx);
2727 }
2728 
2729 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2730                            int timeridx)
2731 {
2732     ARMCPU *cpu = env_archcpu(env);
2733 
2734     timer_del(cpu->gt_timer[timeridx]);
2735 }
2736 
2737 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2738 {
2739     return gt_get_countervalue(env);
2740 }
2741 
2742 static uint64_t gt_virt_cnt_offset(CPUARMState *env)
2743 {
2744     uint64_t hcr;
2745 
2746     switch (arm_current_el(env)) {
2747     case 2:
2748         hcr = arm_hcr_el2_eff(env);
2749         if (hcr & HCR_E2H) {
2750             return 0;
2751         }
2752         break;
2753     case 0:
2754         hcr = arm_hcr_el2_eff(env);
2755         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2756             return 0;
2757         }
2758         break;
2759     }
2760 
2761     return env->cp15.cntvoff_el2;
2762 }
2763 
2764 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2765 {
2766     return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
2767 }
2768 
2769 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2770                           int timeridx,
2771                           uint64_t value)
2772 {
2773     trace_arm_gt_cval_write(timeridx, value);
2774     env->cp15.c14_timer[timeridx].cval = value;
2775     gt_recalc_timer(env_archcpu(env), timeridx);
2776 }
2777 
2778 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2779                              int timeridx)
2780 {
2781     uint64_t offset = 0;
2782 
2783     switch (timeridx) {
2784     case GTIMER_VIRT:
2785     case GTIMER_HYPVIRT:
2786         offset = gt_virt_cnt_offset(env);
2787         break;
2788     }
2789 
2790     return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2791                       (gt_get_countervalue(env) - offset));
2792 }
2793 
2794 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2795                           int timeridx,
2796                           uint64_t value)
2797 {
2798     uint64_t offset = 0;
2799 
2800     switch (timeridx) {
2801     case GTIMER_VIRT:
2802     case GTIMER_HYPVIRT:
2803         offset = gt_virt_cnt_offset(env);
2804         break;
2805     }
2806 
2807     trace_arm_gt_tval_write(timeridx, value);
2808     env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2809                                          sextract64(value, 0, 32);
2810     gt_recalc_timer(env_archcpu(env), timeridx);
2811 }
2812 
2813 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2814                          int timeridx,
2815                          uint64_t value)
2816 {
2817     ARMCPU *cpu = env_archcpu(env);
2818     uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2819 
2820     trace_arm_gt_ctl_write(timeridx, value);
2821     env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2822     if ((oldval ^ value) & 1) {
2823         /* Enable toggled */
2824         gt_recalc_timer(cpu, timeridx);
2825     } else if ((oldval ^ value) & 2) {
2826         /*
2827          * IMASK toggled: don't need to recalculate,
2828          * just set the interrupt line based on ISTATUS
2829          */
2830         trace_arm_gt_imask_toggle(timeridx);
2831         gt_update_irq(cpu, timeridx);
2832     }
2833 }
2834 
2835 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2836 {
2837     gt_timer_reset(env, ri, GTIMER_PHYS);
2838 }
2839 
2840 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2841                                uint64_t value)
2842 {
2843     gt_cval_write(env, ri, GTIMER_PHYS, value);
2844 }
2845 
2846 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2847 {
2848     return gt_tval_read(env, ri, GTIMER_PHYS);
2849 }
2850 
2851 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2852                                uint64_t value)
2853 {
2854     gt_tval_write(env, ri, GTIMER_PHYS, value);
2855 }
2856 
2857 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2858                               uint64_t value)
2859 {
2860     gt_ctl_write(env, ri, GTIMER_PHYS, value);
2861 }
2862 
2863 static int gt_phys_redir_timeridx(CPUARMState *env)
2864 {
2865     switch (arm_mmu_idx(env)) {
2866     case ARMMMUIdx_E20_0:
2867     case ARMMMUIdx_E20_2:
2868     case ARMMMUIdx_E20_2_PAN:
2869         return GTIMER_HYP;
2870     default:
2871         return GTIMER_PHYS;
2872     }
2873 }
2874 
2875 static int gt_virt_redir_timeridx(CPUARMState *env)
2876 {
2877     switch (arm_mmu_idx(env)) {
2878     case ARMMMUIdx_E20_0:
2879     case ARMMMUIdx_E20_2:
2880     case ARMMMUIdx_E20_2_PAN:
2881         return GTIMER_HYPVIRT;
2882     default:
2883         return GTIMER_VIRT;
2884     }
2885 }
2886 
2887 static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
2888                                         const ARMCPRegInfo *ri)
2889 {
2890     int timeridx = gt_phys_redir_timeridx(env);
2891     return env->cp15.c14_timer[timeridx].cval;
2892 }
2893 
2894 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2895                                      uint64_t value)
2896 {
2897     int timeridx = gt_phys_redir_timeridx(env);
2898     gt_cval_write(env, ri, timeridx, value);
2899 }
2900 
2901 static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
2902                                         const ARMCPRegInfo *ri)
2903 {
2904     int timeridx = gt_phys_redir_timeridx(env);
2905     return gt_tval_read(env, ri, timeridx);
2906 }
2907 
2908 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2909                                      uint64_t value)
2910 {
2911     int timeridx = gt_phys_redir_timeridx(env);
2912     gt_tval_write(env, ri, timeridx, value);
2913 }
2914 
2915 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
2916                                        const ARMCPRegInfo *ri)
2917 {
2918     int timeridx = gt_phys_redir_timeridx(env);
2919     return env->cp15.c14_timer[timeridx].ctl;
2920 }
2921 
2922 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2923                                     uint64_t value)
2924 {
2925     int timeridx = gt_phys_redir_timeridx(env);
2926     gt_ctl_write(env, ri, timeridx, value);
2927 }
2928 
2929 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2930 {
2931     gt_timer_reset(env, ri, GTIMER_VIRT);
2932 }
2933 
2934 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2935                                uint64_t value)
2936 {
2937     gt_cval_write(env, ri, GTIMER_VIRT, value);
2938 }
2939 
2940 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2941 {
2942     return gt_tval_read(env, ri, GTIMER_VIRT);
2943 }
2944 
2945 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2946                                uint64_t value)
2947 {
2948     gt_tval_write(env, ri, GTIMER_VIRT, value);
2949 }
2950 
2951 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2952                               uint64_t value)
2953 {
2954     gt_ctl_write(env, ri, GTIMER_VIRT, value);
2955 }
2956 
2957 static void gt_cnthctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2958                              uint64_t value)
2959 {
2960     ARMCPU *cpu = env_archcpu(env);
2961     uint32_t oldval = env->cp15.cnthctl_el2;
2962 
2963     raw_write(env, ri, value);
2964 
2965     if ((oldval ^ value) & CNTHCTL_CNTVMASK) {
2966         gt_update_irq(cpu, GTIMER_VIRT);
2967     } else if ((oldval ^ value) & CNTHCTL_CNTPMASK) {
2968         gt_update_irq(cpu, GTIMER_PHYS);
2969     }
2970 }
2971 
2972 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2973                               uint64_t value)
2974 {
2975     ARMCPU *cpu = env_archcpu(env);
2976 
2977     trace_arm_gt_cntvoff_write(value);
2978     raw_write(env, ri, value);
2979     gt_recalc_timer(cpu, GTIMER_VIRT);
2980 }
2981 
2982 static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
2983                                         const ARMCPRegInfo *ri)
2984 {
2985     int timeridx = gt_virt_redir_timeridx(env);
2986     return env->cp15.c14_timer[timeridx].cval;
2987 }
2988 
2989 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2990                                      uint64_t value)
2991 {
2992     int timeridx = gt_virt_redir_timeridx(env);
2993     gt_cval_write(env, ri, timeridx, value);
2994 }
2995 
2996 static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
2997                                         const ARMCPRegInfo *ri)
2998 {
2999     int timeridx = gt_virt_redir_timeridx(env);
3000     return gt_tval_read(env, ri, timeridx);
3001 }
3002 
3003 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3004                                      uint64_t value)
3005 {
3006     int timeridx = gt_virt_redir_timeridx(env);
3007     gt_tval_write(env, ri, timeridx, value);
3008 }
3009 
3010 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
3011                                        const ARMCPRegInfo *ri)
3012 {
3013     int timeridx = gt_virt_redir_timeridx(env);
3014     return env->cp15.c14_timer[timeridx].ctl;
3015 }
3016 
3017 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3018                                     uint64_t value)
3019 {
3020     int timeridx = gt_virt_redir_timeridx(env);
3021     gt_ctl_write(env, ri, timeridx, value);
3022 }
3023 
3024 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3025 {
3026     gt_timer_reset(env, ri, GTIMER_HYP);
3027 }
3028 
3029 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3030                               uint64_t value)
3031 {
3032     gt_cval_write(env, ri, GTIMER_HYP, value);
3033 }
3034 
3035 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3036 {
3037     return gt_tval_read(env, ri, GTIMER_HYP);
3038 }
3039 
3040 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3041                               uint64_t value)
3042 {
3043     gt_tval_write(env, ri, GTIMER_HYP, value);
3044 }
3045 
3046 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3047                               uint64_t value)
3048 {
3049     gt_ctl_write(env, ri, GTIMER_HYP, value);
3050 }
3051 
3052 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3053 {
3054     gt_timer_reset(env, ri, GTIMER_SEC);
3055 }
3056 
3057 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3058                               uint64_t value)
3059 {
3060     gt_cval_write(env, ri, GTIMER_SEC, value);
3061 }
3062 
3063 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3064 {
3065     return gt_tval_read(env, ri, GTIMER_SEC);
3066 }
3067 
3068 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3069                               uint64_t value)
3070 {
3071     gt_tval_write(env, ri, GTIMER_SEC, value);
3072 }
3073 
3074 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3075                               uint64_t value)
3076 {
3077     gt_ctl_write(env, ri, GTIMER_SEC, value);
3078 }
3079 
3080 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3081 {
3082     gt_timer_reset(env, ri, GTIMER_HYPVIRT);
3083 }
3084 
3085 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3086                              uint64_t value)
3087 {
3088     gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
3089 }
3090 
3091 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3092 {
3093     return gt_tval_read(env, ri, GTIMER_HYPVIRT);
3094 }
3095 
3096 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3097                              uint64_t value)
3098 {
3099     gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
3100 }
3101 
3102 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3103                             uint64_t value)
3104 {
3105     gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
3106 }
3107 
3108 void arm_gt_ptimer_cb(void *opaque)
3109 {
3110     ARMCPU *cpu = opaque;
3111 
3112     gt_recalc_timer(cpu, GTIMER_PHYS);
3113 }
3114 
3115 void arm_gt_vtimer_cb(void *opaque)
3116 {
3117     ARMCPU *cpu = opaque;
3118 
3119     gt_recalc_timer(cpu, GTIMER_VIRT);
3120 }
3121 
3122 void arm_gt_htimer_cb(void *opaque)
3123 {
3124     ARMCPU *cpu = opaque;
3125 
3126     gt_recalc_timer(cpu, GTIMER_HYP);
3127 }
3128 
3129 void arm_gt_stimer_cb(void *opaque)
3130 {
3131     ARMCPU *cpu = opaque;
3132 
3133     gt_recalc_timer(cpu, GTIMER_SEC);
3134 }
3135 
3136 void arm_gt_hvtimer_cb(void *opaque)
3137 {
3138     ARMCPU *cpu = opaque;
3139 
3140     gt_recalc_timer(cpu, GTIMER_HYPVIRT);
3141 }
3142 
3143 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
3144 {
3145     ARMCPU *cpu = env_archcpu(env);
3146 
3147     cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
3148 }
3149 
3150 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3151     /*
3152      * Note that CNTFRQ is purely reads-as-written for the benefit
3153      * of software; writing it doesn't actually change the timer frequency.
3154      * Our reset value matches the fixed frequency we implement the timer at.
3155      */
3156     { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
3157       .type = ARM_CP_ALIAS,
3158       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3159       .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
3160     },
3161     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3162       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3163       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3164       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3165       .resetfn = arm_gt_cntfrq_reset,
3166     },
3167     /* overall control: mostly access permissions */
3168     { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
3169       .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
3170       .access = PL1_RW,
3171       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
3172       .resetvalue = 0,
3173     },
3174     /* per-timer control */
3175     { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3176       .secure = ARM_CP_SECSTATE_NS,
3177       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3178       .accessfn = gt_ptimer_access,
3179       .fieldoffset = offsetoflow32(CPUARMState,
3180                                    cp15.c14_timer[GTIMER_PHYS].ctl),
3181       .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3182       .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3183     },
3184     { .name = "CNTP_CTL_S",
3185       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3186       .secure = ARM_CP_SECSTATE_S,
3187       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3188       .accessfn = gt_ptimer_access,
3189       .fieldoffset = offsetoflow32(CPUARMState,
3190                                    cp15.c14_timer[GTIMER_SEC].ctl),
3191       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3192     },
3193     { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
3194       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
3195       .type = ARM_CP_IO, .access = PL0_RW,
3196       .accessfn = gt_ptimer_access,
3197       .nv2_redirect_offset = 0x180 | NV2_REDIR_NV1,
3198       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
3199       .resetvalue = 0,
3200       .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3201       .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3202     },
3203     { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
3204       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3205       .accessfn = gt_vtimer_access,
3206       .fieldoffset = offsetoflow32(CPUARMState,
3207                                    cp15.c14_timer[GTIMER_VIRT].ctl),
3208       .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3209       .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3210     },
3211     { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
3212       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
3213       .type = ARM_CP_IO, .access = PL0_RW,
3214       .accessfn = gt_vtimer_access,
3215       .nv2_redirect_offset = 0x170 | NV2_REDIR_NV1,
3216       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
3217       .resetvalue = 0,
3218       .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3219       .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3220     },
3221     /* TimerValue views: a 32 bit downcounting view of the underlying state */
3222     { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3223       .secure = ARM_CP_SECSTATE_NS,
3224       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3225       .accessfn = gt_ptimer_access,
3226       .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3227     },
3228     { .name = "CNTP_TVAL_S",
3229       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3230       .secure = ARM_CP_SECSTATE_S,
3231       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3232       .accessfn = gt_ptimer_access,
3233       .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
3234     },
3235     { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3236       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
3237       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3238       .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
3239       .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3240     },
3241     { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
3242       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3243       .accessfn = gt_vtimer_access,
3244       .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3245     },
3246     { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3247       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
3248       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3249       .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
3250       .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3251     },
3252     /* The counter itself */
3253     { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
3254       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3255       .accessfn = gt_pct_access,
3256       .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
3257     },
3258     { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
3259       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
3260       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3261       .accessfn = gt_pct_access, .readfn = gt_cnt_read,
3262     },
3263     { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
3264       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3265       .accessfn = gt_vct_access,
3266       .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
3267     },
3268     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3269       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3270       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3271       .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
3272     },
3273     /* Comparison value, indicating when the timer goes off */
3274     { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
3275       .secure = ARM_CP_SECSTATE_NS,
3276       .access = PL0_RW,
3277       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3278       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3279       .accessfn = gt_ptimer_access,
3280       .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3281       .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3282     },
3283     { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
3284       .secure = ARM_CP_SECSTATE_S,
3285       .access = PL0_RW,
3286       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3287       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3288       .accessfn = gt_ptimer_access,
3289       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3290     },
3291     { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3292       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
3293       .access = PL0_RW,
3294       .type = ARM_CP_IO,
3295       .nv2_redirect_offset = 0x178 | NV2_REDIR_NV1,
3296       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3297       .resetvalue = 0, .accessfn = gt_ptimer_access,
3298       .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3299       .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3300     },
3301     { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
3302       .access = PL0_RW,
3303       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3304       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3305       .accessfn = gt_vtimer_access,
3306       .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3307       .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3308     },
3309     { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3310       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
3311       .access = PL0_RW,
3312       .type = ARM_CP_IO,
3313       .nv2_redirect_offset = 0x168 | NV2_REDIR_NV1,
3314       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3315       .resetvalue = 0, .accessfn = gt_vtimer_access,
3316       .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3317       .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3318     },
3319     /*
3320      * Secure timer -- this is actually restricted to only EL3
3321      * and configurably Secure-EL1 via the accessfn.
3322      */
3323     { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
3324       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
3325       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
3326       .accessfn = gt_stimer_access,
3327       .readfn = gt_sec_tval_read,
3328       .writefn = gt_sec_tval_write,
3329       .resetfn = gt_sec_timer_reset,
3330     },
3331     { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
3332       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
3333       .type = ARM_CP_IO, .access = PL1_RW,
3334       .accessfn = gt_stimer_access,
3335       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
3336       .resetvalue = 0,
3337       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3338     },
3339     { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
3340       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
3341       .type = ARM_CP_IO, .access = PL1_RW,
3342       .accessfn = gt_stimer_access,
3343       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3344       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3345     },
3346 };
3347 
3348 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
3349                                  bool isread)
3350 {
3351     if (arm_current_el(env) == 1) {
3352         /* This must be a FEAT_NV access */
3353         /* TODO: FEAT_ECV will need to check CNTHCTL_EL2 here */
3354         return CP_ACCESS_OK;
3355     }
3356     if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
3357         return CP_ACCESS_TRAP;
3358     }
3359     return CP_ACCESS_OK;
3360 }
3361 
3362 #else
3363 
3364 /*
3365  * In user-mode most of the generic timer registers are inaccessible
3366  * however modern kernels (4.12+) allow access to cntvct_el0
3367  */
3368 
3369 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
3370 {
3371     ARMCPU *cpu = env_archcpu(env);
3372 
3373     /*
3374      * Currently we have no support for QEMUTimer in linux-user so we
3375      * can't call gt_get_countervalue(env), instead we directly
3376      * call the lower level functions.
3377      */
3378     return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
3379 }
3380 
3381 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3382     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3383       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3384       .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
3385       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3386       .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
3387     },
3388     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3389       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3390       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3391       .readfn = gt_virt_cnt_read,
3392     },
3393 };
3394 
3395 #endif
3396 
3397 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3398 {
3399     if (arm_feature(env, ARM_FEATURE_LPAE)) {
3400         raw_write(env, ri, value);
3401     } else if (arm_feature(env, ARM_FEATURE_V7)) {
3402         raw_write(env, ri, value & 0xfffff6ff);
3403     } else {
3404         raw_write(env, ri, value & 0xfffff1ff);
3405     }
3406 }
3407 
3408 #ifndef CONFIG_USER_ONLY
3409 /* get_phys_addr() isn't present for user-mode-only targets */
3410 
3411 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
3412                                  bool isread)
3413 {
3414     if (ri->opc2 & 4) {
3415         /*
3416          * The ATS12NSO* operations must trap to EL3 or EL2 if executed in
3417          * Secure EL1 (which can only happen if EL3 is AArch64).
3418          * They are simply UNDEF if executed from NS EL1.
3419          * They function normally from EL2 or EL3.
3420          */
3421         if (arm_current_el(env) == 1) {
3422             if (arm_is_secure_below_el3(env)) {
3423                 if (env->cp15.scr_el3 & SCR_EEL2) {
3424                     return CP_ACCESS_TRAP_EL2;
3425                 }
3426                 return CP_ACCESS_TRAP_EL3;
3427             }
3428             return CP_ACCESS_TRAP_UNCATEGORIZED;
3429         }
3430     }
3431     return CP_ACCESS_OK;
3432 }
3433 
3434 #ifdef CONFIG_TCG
3435 static int par_el1_shareability(GetPhysAddrResult *res)
3436 {
3437     /*
3438      * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC
3439      * memory -- see pseudocode PAREncodeShareability().
3440      */
3441     if (((res->cacheattrs.attrs & 0xf0) == 0) ||
3442         res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) {
3443         return 2;
3444     }
3445     return res->cacheattrs.shareability;
3446 }
3447 
3448 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
3449                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
3450                              ARMSecuritySpace ss)
3451 {
3452     bool ret;
3453     uint64_t par64;
3454     bool format64 = false;
3455     ARMMMUFaultInfo fi = {};
3456     GetPhysAddrResult res = {};
3457 
3458     /*
3459      * I_MXTJT: Granule protection checks are not performed on the final address
3460      * of a successful translation.
3461      */
3462     ret = get_phys_addr_with_space_nogpc(env, value, access_type, mmu_idx, ss,
3463                                          &res, &fi);
3464 
3465     /*
3466      * ATS operations only do S1 or S1+S2 translations, so we never
3467      * have to deal with the ARMCacheAttrs format for S2 only.
3468      */
3469     assert(!res.cacheattrs.is_s2_format);
3470 
3471     if (ret) {
3472         /*
3473          * Some kinds of translation fault must cause exceptions rather
3474          * than being reported in the PAR.
3475          */
3476         int current_el = arm_current_el(env);
3477         int target_el;
3478         uint32_t syn, fsr, fsc;
3479         bool take_exc = false;
3480 
3481         if (fi.s1ptw && current_el == 1
3482             && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
3483             /*
3484              * Synchronous stage 2 fault on an access made as part of the
3485              * translation table walk for AT S1E0* or AT S1E1* insn
3486              * executed from NS EL1. If this is a synchronous external abort
3487              * and SCR_EL3.EA == 1, then we take a synchronous external abort
3488              * to EL3. Otherwise the fault is taken as an exception to EL2,
3489              * and HPFAR_EL2 holds the faulting IPA.
3490              */
3491             if (fi.type == ARMFault_SyncExternalOnWalk &&
3492                 (env->cp15.scr_el3 & SCR_EA)) {
3493                 target_el = 3;
3494             } else {
3495                 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
3496                 if (arm_is_secure_below_el3(env) && fi.s1ns) {
3497                     env->cp15.hpfar_el2 |= HPFAR_NS;
3498                 }
3499                 target_el = 2;
3500             }
3501             take_exc = true;
3502         } else if (fi.type == ARMFault_SyncExternalOnWalk) {
3503             /*
3504              * Synchronous external aborts during a translation table walk
3505              * are taken as Data Abort exceptions.
3506              */
3507             if (fi.stage2) {
3508                 if (current_el == 3) {
3509                     target_el = 3;
3510                 } else {
3511                     target_el = 2;
3512                 }
3513             } else {
3514                 target_el = exception_target_el(env);
3515             }
3516             take_exc = true;
3517         }
3518 
3519         if (take_exc) {
3520             /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3521             if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
3522                 arm_s1_regime_using_lpae_format(env, mmu_idx)) {
3523                 fsr = arm_fi_to_lfsc(&fi);
3524                 fsc = extract32(fsr, 0, 6);
3525             } else {
3526                 fsr = arm_fi_to_sfsc(&fi);
3527                 fsc = 0x3f;
3528             }
3529             /*
3530              * Report exception with ESR indicating a fault due to a
3531              * translation table walk for a cache maintenance instruction.
3532              */
3533             syn = syn_data_abort_no_iss(current_el == target_el, 0,
3534                                         fi.ea, 1, fi.s1ptw, 1, fsc);
3535             env->exception.vaddress = value;
3536             env->exception.fsr = fsr;
3537             raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
3538         }
3539     }
3540 
3541     if (is_a64(env)) {
3542         format64 = true;
3543     } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
3544         /*
3545          * ATS1Cxx:
3546          * * TTBCR.EAE determines whether the result is returned using the
3547          *   32-bit or the 64-bit PAR format
3548          * * Instructions executed in Hyp mode always use the 64bit format
3549          *
3550          * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3551          * * The Non-secure TTBCR.EAE bit is set to 1
3552          * * The implementation includes EL2, and the value of HCR.VM is 1
3553          *
3554          * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3555          *
3556          * ATS1Hx always uses the 64bit format.
3557          */
3558         format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
3559 
3560         if (arm_feature(env, ARM_FEATURE_EL2)) {
3561             if (mmu_idx == ARMMMUIdx_E10_0 ||
3562                 mmu_idx == ARMMMUIdx_E10_1 ||
3563                 mmu_idx == ARMMMUIdx_E10_1_PAN) {
3564                 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
3565             } else {
3566                 format64 |= arm_current_el(env) == 2;
3567             }
3568         }
3569     }
3570 
3571     if (format64) {
3572         /* Create a 64-bit PAR */
3573         par64 = (1 << 11); /* LPAE bit always set */
3574         if (!ret) {
3575             par64 |= res.f.phys_addr & ~0xfffULL;
3576             if (!res.f.attrs.secure) {
3577                 par64 |= (1 << 9); /* NS */
3578             }
3579             par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
3580             par64 |= par_el1_shareability(&res) << 7; /* SH */
3581         } else {
3582             uint32_t fsr = arm_fi_to_lfsc(&fi);
3583 
3584             par64 |= 1; /* F */
3585             par64 |= (fsr & 0x3f) << 1; /* FS */
3586             if (fi.stage2) {
3587                 par64 |= (1 << 9); /* S */
3588             }
3589             if (fi.s1ptw) {
3590                 par64 |= (1 << 8); /* PTW */
3591             }
3592         }
3593     } else {
3594         /*
3595          * fsr is a DFSR/IFSR value for the short descriptor
3596          * translation table format (with WnR always clear).
3597          * Convert it to a 32-bit PAR.
3598          */
3599         if (!ret) {
3600             /* We do not set any attribute bits in the PAR */
3601             if (res.f.lg_page_size == 24
3602                 && arm_feature(env, ARM_FEATURE_V7)) {
3603                 par64 = (res.f.phys_addr & 0xff000000) | (1 << 1);
3604             } else {
3605                 par64 = res.f.phys_addr & 0xfffff000;
3606             }
3607             if (!res.f.attrs.secure) {
3608                 par64 |= (1 << 9); /* NS */
3609             }
3610         } else {
3611             uint32_t fsr = arm_fi_to_sfsc(&fi);
3612 
3613             par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
3614                     ((fsr & 0xf) << 1) | 1;
3615         }
3616     }
3617     return par64;
3618 }
3619 #endif /* CONFIG_TCG */
3620 
3621 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3622 {
3623 #ifdef CONFIG_TCG
3624     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3625     uint64_t par64;
3626     ARMMMUIdx mmu_idx;
3627     int el = arm_current_el(env);
3628     ARMSecuritySpace ss = arm_security_space(env);
3629 
3630     switch (ri->opc2 & 6) {
3631     case 0:
3632         /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
3633         switch (el) {
3634         case 3:
3635             mmu_idx = ARMMMUIdx_E3;
3636             break;
3637         case 2:
3638             g_assert(ss != ARMSS_Secure);  /* ARMv8.4-SecEL2 is 64-bit only */
3639             /* fall through */
3640         case 1:
3641             if (ri->crm == 9 && arm_pan_enabled(env)) {
3642                 mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
3643             } else {
3644                 mmu_idx = ARMMMUIdx_Stage1_E1;
3645             }
3646             break;
3647         default:
3648             g_assert_not_reached();
3649         }
3650         break;
3651     case 2:
3652         /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3653         switch (el) {
3654         case 3:
3655             mmu_idx = ARMMMUIdx_E10_0;
3656             break;
3657         case 2:
3658             g_assert(ss != ARMSS_Secure);  /* ARMv8.4-SecEL2 is 64-bit only */
3659             mmu_idx = ARMMMUIdx_Stage1_E0;
3660             break;
3661         case 1:
3662             mmu_idx = ARMMMUIdx_Stage1_E0;
3663             break;
3664         default:
3665             g_assert_not_reached();
3666         }
3667         break;
3668     case 4:
3669         /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3670         mmu_idx = ARMMMUIdx_E10_1;
3671         ss = ARMSS_NonSecure;
3672         break;
3673     case 6:
3674         /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3675         mmu_idx = ARMMMUIdx_E10_0;
3676         ss = ARMSS_NonSecure;
3677         break;
3678     default:
3679         g_assert_not_reached();
3680     }
3681 
3682     par64 = do_ats_write(env, value, access_type, mmu_idx, ss);
3683 
3684     A32_BANKED_CURRENT_REG_SET(env, par, par64);
3685 #else
3686     /* Handled by hardware accelerator. */
3687     g_assert_not_reached();
3688 #endif /* CONFIG_TCG */
3689 }
3690 
3691 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3692                         uint64_t value)
3693 {
3694 #ifdef CONFIG_TCG
3695     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3696     uint64_t par64;
3697 
3698     /* There is no SecureEL2 for AArch32. */
3699     par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2,
3700                          ARMSS_NonSecure);
3701 
3702     A32_BANKED_CURRENT_REG_SET(env, par, par64);
3703 #else
3704     /* Handled by hardware accelerator. */
3705     g_assert_not_reached();
3706 #endif /* CONFIG_TCG */
3707 }
3708 
3709 static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri,
3710                                      bool isread)
3711 {
3712     /*
3713      * R_NYXTL: instruction is UNDEFINED if it applies to an Exception level
3714      * lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can
3715      * only happen when executing at EL3 because that combination also causes an
3716      * illegal exception return. We don't need to check FEAT_RME either, because
3717      * scr_write() ensures that the NSE bit is not set otherwise.
3718      */
3719     if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) {
3720         return CP_ACCESS_TRAP;
3721     }
3722     return CP_ACCESS_OK;
3723 }
3724 
3725 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3726                                      bool isread)
3727 {
3728     if (arm_current_el(env) == 3 &&
3729         !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
3730         return CP_ACCESS_TRAP;
3731     }
3732     return at_e012_access(env, ri, isread);
3733 }
3734 
3735 static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri,
3736                                       bool isread)
3737 {
3738     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_AT)) {
3739         return CP_ACCESS_TRAP_EL2;
3740     }
3741     return at_e012_access(env, ri, isread);
3742 }
3743 
3744 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3745                         uint64_t value)
3746 {
3747 #ifdef CONFIG_TCG
3748     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3749     ARMMMUIdx mmu_idx;
3750     uint64_t hcr_el2 = arm_hcr_el2_eff(env);
3751     bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE);
3752 
3753     switch (ri->opc2 & 6) {
3754     case 0:
3755         switch (ri->opc1) {
3756         case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3757             if (ri->crm == 9 && arm_pan_enabled(env)) {
3758                 mmu_idx = regime_e20 ?
3759                           ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN;
3760             } else {
3761                 mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1;
3762             }
3763             break;
3764         case 4: /* AT S1E2R, AT S1E2W */
3765             mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2;
3766             break;
3767         case 6: /* AT S1E3R, AT S1E3W */
3768             mmu_idx = ARMMMUIdx_E3;
3769             break;
3770         default:
3771             g_assert_not_reached();
3772         }
3773         break;
3774     case 2: /* AT S1E0R, AT S1E0W */
3775         mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0;
3776         break;
3777     case 4: /* AT S12E1R, AT S12E1W */
3778         mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1;
3779         break;
3780     case 6: /* AT S12E0R, AT S12E0W */
3781         mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0;
3782         break;
3783     default:
3784         g_assert_not_reached();
3785     }
3786 
3787     env->cp15.par_el[1] = do_ats_write(env, value, access_type,
3788                                        mmu_idx, arm_security_space(env));
3789 #else
3790     /* Handled by hardware accelerator. */
3791     g_assert_not_reached();
3792 #endif /* CONFIG_TCG */
3793 }
3794 #endif
3795 
3796 /* Return basic MPU access permission bits.  */
3797 static uint32_t simple_mpu_ap_bits(uint32_t val)
3798 {
3799     uint32_t ret;
3800     uint32_t mask;
3801     int i;
3802     ret = 0;
3803     mask = 3;
3804     for (i = 0; i < 16; i += 2) {
3805         ret |= (val >> i) & mask;
3806         mask <<= 2;
3807     }
3808     return ret;
3809 }
3810 
3811 /* Pad basic MPU access permission bits to extended format.  */
3812 static uint32_t extended_mpu_ap_bits(uint32_t val)
3813 {
3814     uint32_t ret;
3815     uint32_t mask;
3816     int i;
3817     ret = 0;
3818     mask = 3;
3819     for (i = 0; i < 16; i += 2) {
3820         ret |= (val & mask) << i;
3821         mask <<= 2;
3822     }
3823     return ret;
3824 }
3825 
3826 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3827                                  uint64_t value)
3828 {
3829     env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
3830 }
3831 
3832 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3833 {
3834     return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
3835 }
3836 
3837 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3838                                  uint64_t value)
3839 {
3840     env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
3841 }
3842 
3843 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3844 {
3845     return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
3846 }
3847 
3848 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3849 {
3850     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3851 
3852     if (!u32p) {
3853         return 0;
3854     }
3855 
3856     u32p += env->pmsav7.rnr[M_REG_NS];
3857     return *u32p;
3858 }
3859 
3860 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
3861                          uint64_t value)
3862 {
3863     ARMCPU *cpu = env_archcpu(env);
3864     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3865 
3866     if (!u32p) {
3867         return;
3868     }
3869 
3870     u32p += env->pmsav7.rnr[M_REG_NS];
3871     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3872     *u32p = value;
3873 }
3874 
3875 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3876                               uint64_t value)
3877 {
3878     ARMCPU *cpu = env_archcpu(env);
3879     uint32_t nrgs = cpu->pmsav7_dregion;
3880 
3881     if (value >= nrgs) {
3882         qemu_log_mask(LOG_GUEST_ERROR,
3883                       "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3884                       " > %" PRIu32 "\n", (uint32_t)value, nrgs);
3885         return;
3886     }
3887 
3888     raw_write(env, ri, value);
3889 }
3890 
3891 static void prbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3892                           uint64_t value)
3893 {
3894     ARMCPU *cpu = env_archcpu(env);
3895 
3896     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3897     env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
3898 }
3899 
3900 static uint64_t prbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
3901 {
3902     return env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
3903 }
3904 
3905 static void prlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3906                           uint64_t value)
3907 {
3908     ARMCPU *cpu = env_archcpu(env);
3909 
3910     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3911     env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
3912 }
3913 
3914 static uint64_t prlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
3915 {
3916     return env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
3917 }
3918 
3919 static void prselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3920                            uint64_t value)
3921 {
3922     ARMCPU *cpu = env_archcpu(env);
3923 
3924     /*
3925      * Ignore writes that would select not implemented region.
3926      * This is architecturally UNPREDICTABLE.
3927      */
3928     if (value >= cpu->pmsav7_dregion) {
3929         return;
3930     }
3931 
3932     env->pmsav7.rnr[M_REG_NS] = value;
3933 }
3934 
3935 static void hprbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3936                           uint64_t value)
3937 {
3938     ARMCPU *cpu = env_archcpu(env);
3939 
3940     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3941     env->pmsav8.hprbar[env->pmsav8.hprselr] = value;
3942 }
3943 
3944 static uint64_t hprbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
3945 {
3946     return env->pmsav8.hprbar[env->pmsav8.hprselr];
3947 }
3948 
3949 static void hprlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3950                           uint64_t value)
3951 {
3952     ARMCPU *cpu = env_archcpu(env);
3953 
3954     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3955     env->pmsav8.hprlar[env->pmsav8.hprselr] = value;
3956 }
3957 
3958 static uint64_t hprlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
3959 {
3960     return env->pmsav8.hprlar[env->pmsav8.hprselr];
3961 }
3962 
3963 static void hprenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3964                           uint64_t value)
3965 {
3966     uint32_t n;
3967     uint32_t bit;
3968     ARMCPU *cpu = env_archcpu(env);
3969 
3970     /* Ignore writes to unimplemented regions */
3971     int rmax = MIN(cpu->pmsav8r_hdregion, 32);
3972     value &= MAKE_64BIT_MASK(0, rmax);
3973 
3974     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3975 
3976     /* Register alias is only valid for first 32 indexes */
3977     for (n = 0; n < rmax; ++n) {
3978         bit = extract32(value, n, 1);
3979         env->pmsav8.hprlar[n] = deposit32(
3980                     env->pmsav8.hprlar[n], 0, 1, bit);
3981     }
3982 }
3983 
3984 static uint64_t hprenr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3985 {
3986     uint32_t n;
3987     uint32_t result = 0x0;
3988     ARMCPU *cpu = env_archcpu(env);
3989 
3990     /* Register alias is only valid for first 32 indexes */
3991     for (n = 0; n < MIN(cpu->pmsav8r_hdregion, 32); ++n) {
3992         if (env->pmsav8.hprlar[n] & 0x1) {
3993             result |= (0x1 << n);
3994         }
3995     }
3996     return result;
3997 }
3998 
3999 static void hprselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4000                            uint64_t value)
4001 {
4002     ARMCPU *cpu = env_archcpu(env);
4003 
4004     /*
4005      * Ignore writes that would select not implemented region.
4006      * This is architecturally UNPREDICTABLE.
4007      */
4008     if (value >= cpu->pmsav8r_hdregion) {
4009         return;
4010     }
4011 
4012     env->pmsav8.hprselr = value;
4013 }
4014 
4015 static void pmsav8r_regn_write(CPUARMState *env, const ARMCPRegInfo *ri,
4016                           uint64_t value)
4017 {
4018     ARMCPU *cpu = env_archcpu(env);
4019     uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
4020                     (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
4021 
4022     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4023 
4024     if (ri->opc1 & 4) {
4025         if (index >= cpu->pmsav8r_hdregion) {
4026             return;
4027         }
4028         if (ri->opc2 & 0x1) {
4029             env->pmsav8.hprlar[index] = value;
4030         } else {
4031             env->pmsav8.hprbar[index] = value;
4032         }
4033     } else {
4034         if (index >= cpu->pmsav7_dregion) {
4035             return;
4036         }
4037         if (ri->opc2 & 0x1) {
4038             env->pmsav8.rlar[M_REG_NS][index] = value;
4039         } else {
4040             env->pmsav8.rbar[M_REG_NS][index] = value;
4041         }
4042     }
4043 }
4044 
4045 static uint64_t pmsav8r_regn_read(CPUARMState *env, const ARMCPRegInfo *ri)
4046 {
4047     ARMCPU *cpu = env_archcpu(env);
4048     uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
4049                     (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
4050 
4051     if (ri->opc1 & 4) {
4052         if (index >= cpu->pmsav8r_hdregion) {
4053             return 0x0;
4054         }
4055         if (ri->opc2 & 0x1) {
4056             return env->pmsav8.hprlar[index];
4057         } else {
4058             return env->pmsav8.hprbar[index];
4059         }
4060     } else {
4061         if (index >= cpu->pmsav7_dregion) {
4062             return 0x0;
4063         }
4064         if (ri->opc2 & 0x1) {
4065             return env->pmsav8.rlar[M_REG_NS][index];
4066         } else {
4067             return env->pmsav8.rbar[M_REG_NS][index];
4068         }
4069     }
4070 }
4071 
4072 static const ARMCPRegInfo pmsav8r_cp_reginfo[] = {
4073     { .name = "PRBAR",
4074       .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 0,
4075       .access = PL1_RW, .type = ARM_CP_NO_RAW,
4076       .accessfn = access_tvm_trvm,
4077       .readfn = prbar_read, .writefn = prbar_write },
4078     { .name = "PRLAR",
4079       .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 1,
4080       .access = PL1_RW, .type = ARM_CP_NO_RAW,
4081       .accessfn = access_tvm_trvm,
4082       .readfn = prlar_read, .writefn = prlar_write },
4083     { .name = "PRSELR", .resetvalue = 0,
4084       .cp = 15, .opc1 = 0, .crn = 6, .crm = 2, .opc2 = 1,
4085       .access = PL1_RW, .accessfn = access_tvm_trvm,
4086       .writefn = prselr_write,
4087       .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]) },
4088     { .name = "HPRBAR", .resetvalue = 0,
4089       .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 0,
4090       .access = PL2_RW, .type = ARM_CP_NO_RAW,
4091       .readfn = hprbar_read, .writefn = hprbar_write },
4092     { .name = "HPRLAR",
4093       .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 1,
4094       .access = PL2_RW, .type = ARM_CP_NO_RAW,
4095       .readfn = hprlar_read, .writefn = hprlar_write },
4096     { .name = "HPRSELR", .resetvalue = 0,
4097       .cp = 15, .opc1 = 4, .crn = 6, .crm = 2, .opc2 = 1,
4098       .access = PL2_RW,
4099       .writefn = hprselr_write,
4100       .fieldoffset = offsetof(CPUARMState, pmsav8.hprselr) },
4101     { .name = "HPRENR",
4102       .cp = 15, .opc1 = 4, .crn = 6, .crm = 1, .opc2 = 1,
4103       .access = PL2_RW, .type = ARM_CP_NO_RAW,
4104       .readfn = hprenr_read, .writefn = hprenr_write },
4105 };
4106 
4107 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
4108     /*
4109      * Reset for all these registers is handled in arm_cpu_reset(),
4110      * because the PMSAv7 is also used by M-profile CPUs, which do
4111      * not register cpregs but still need the state to be reset.
4112      */
4113     { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
4114       .access = PL1_RW, .type = ARM_CP_NO_RAW,
4115       .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
4116       .readfn = pmsav7_read, .writefn = pmsav7_write,
4117       .resetfn = arm_cp_reset_ignore },
4118     { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
4119       .access = PL1_RW, .type = ARM_CP_NO_RAW,
4120       .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
4121       .readfn = pmsav7_read, .writefn = pmsav7_write,
4122       .resetfn = arm_cp_reset_ignore },
4123     { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
4124       .access = PL1_RW, .type = ARM_CP_NO_RAW,
4125       .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
4126       .readfn = pmsav7_read, .writefn = pmsav7_write,
4127       .resetfn = arm_cp_reset_ignore },
4128     { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
4129       .access = PL1_RW,
4130       .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
4131       .writefn = pmsav7_rgnr_write,
4132       .resetfn = arm_cp_reset_ignore },
4133 };
4134 
4135 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
4136     { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
4137       .access = PL1_RW, .type = ARM_CP_ALIAS,
4138       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
4139       .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
4140     { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
4141       .access = PL1_RW, .type = ARM_CP_ALIAS,
4142       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
4143       .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
4144     { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
4145       .access = PL1_RW,
4146       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
4147       .resetvalue = 0, },
4148     { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
4149       .access = PL1_RW,
4150       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
4151       .resetvalue = 0, },
4152     { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
4153       .access = PL1_RW,
4154       .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
4155     { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
4156       .access = PL1_RW,
4157       .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
4158     /* Protection region base and size registers */
4159     { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
4160       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4161       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
4162     { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
4163       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4164       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
4165     { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
4166       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4167       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
4168     { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
4169       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4170       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
4171     { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
4172       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4173       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
4174     { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
4175       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4176       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
4177     { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
4178       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4179       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
4180     { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
4181       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4182       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
4183 };
4184 
4185 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4186                              uint64_t value)
4187 {
4188     ARMCPU *cpu = env_archcpu(env);
4189 
4190     if (!arm_feature(env, ARM_FEATURE_V8)) {
4191         if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
4192             /*
4193              * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
4194              * using Long-descriptor translation table format
4195              */
4196             value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
4197         } else if (arm_feature(env, ARM_FEATURE_EL3)) {
4198             /*
4199              * In an implementation that includes the Security Extensions
4200              * TTBCR has additional fields PD0 [4] and PD1 [5] for
4201              * Short-descriptor translation table format.
4202              */
4203             value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
4204         } else {
4205             value &= TTBCR_N;
4206         }
4207     }
4208 
4209     if (arm_feature(env, ARM_FEATURE_LPAE)) {
4210         /*
4211          * With LPAE the TTBCR could result in a change of ASID
4212          * via the TTBCR.A1 bit, so do a TLB flush.
4213          */
4214         tlb_flush(CPU(cpu));
4215     }
4216     raw_write(env, ri, value);
4217 }
4218 
4219 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
4220                                uint64_t value)
4221 {
4222     ARMCPU *cpu = env_archcpu(env);
4223 
4224     /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
4225     tlb_flush(CPU(cpu));
4226     raw_write(env, ri, value);
4227 }
4228 
4229 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4230                             uint64_t value)
4231 {
4232     /* If the ASID changes (with a 64-bit write), we must flush the TLB.  */
4233     if (cpreg_field_is_64bit(ri) &&
4234         extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
4235         ARMCPU *cpu = env_archcpu(env);
4236         tlb_flush(CPU(cpu));
4237     }
4238     raw_write(env, ri, value);
4239 }
4240 
4241 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4242                                     uint64_t value)
4243 {
4244     /*
4245      * If we are running with E2&0 regime, then an ASID is active.
4246      * Flush if that might be changing.  Note we're not checking
4247      * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
4248      * holds the active ASID, only checking the field that might.
4249      */
4250     if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
4251         (arm_hcr_el2_eff(env) & HCR_E2H)) {
4252         uint16_t mask = ARMMMUIdxBit_E20_2 |
4253                         ARMMMUIdxBit_E20_2_PAN |
4254                         ARMMMUIdxBit_E20_0;
4255         tlb_flush_by_mmuidx(env_cpu(env), mask);
4256     }
4257     raw_write(env, ri, value);
4258 }
4259 
4260 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4261                         uint64_t value)
4262 {
4263     ARMCPU *cpu = env_archcpu(env);
4264     CPUState *cs = CPU(cpu);
4265 
4266     /*
4267      * A change in VMID to the stage2 page table (Stage2) invalidates
4268      * the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0).
4269      */
4270     if (extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
4271         tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
4272     }
4273     raw_write(env, ri, value);
4274 }
4275 
4276 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
4277     { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
4278       .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
4279       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
4280                              offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
4281     { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
4282       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
4283       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
4284                              offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
4285     { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
4286       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
4287       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
4288                              offsetof(CPUARMState, cp15.dfar_ns) } },
4289     { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
4290       .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
4291       .access = PL1_RW, .accessfn = access_tvm_trvm,
4292       .fgt = FGT_FAR_EL1,
4293       .nv2_redirect_offset = 0x220 | NV2_REDIR_NV1,
4294       .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
4295       .resetvalue = 0, },
4296 };
4297 
4298 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
4299     { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
4300       .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
4301       .access = PL1_RW, .accessfn = access_tvm_trvm,
4302       .fgt = FGT_ESR_EL1,
4303       .nv2_redirect_offset = 0x138 | NV2_REDIR_NV1,
4304       .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
4305     { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
4306       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
4307       .access = PL1_RW, .accessfn = access_tvm_trvm,
4308       .fgt = FGT_TTBR0_EL1,
4309       .nv2_redirect_offset = 0x200 | NV2_REDIR_NV1,
4310       .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
4311       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4312                              offsetof(CPUARMState, cp15.ttbr0_ns) } },
4313     { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
4314       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
4315       .access = PL1_RW, .accessfn = access_tvm_trvm,
4316       .fgt = FGT_TTBR1_EL1,
4317       .nv2_redirect_offset = 0x210 | NV2_REDIR_NV1,
4318       .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
4319       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4320                              offsetof(CPUARMState, cp15.ttbr1_ns) } },
4321     { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
4322       .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
4323       .access = PL1_RW, .accessfn = access_tvm_trvm,
4324       .fgt = FGT_TCR_EL1,
4325       .nv2_redirect_offset = 0x120 | NV2_REDIR_NV1,
4326       .writefn = vmsa_tcr_el12_write,
4327       .raw_writefn = raw_write,
4328       .resetvalue = 0,
4329       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
4330     { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
4331       .access = PL1_RW, .accessfn = access_tvm_trvm,
4332       .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
4333       .raw_writefn = raw_write,
4334       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
4335                              offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
4336 };
4337 
4338 /*
4339  * Note that unlike TTBCR, writing to TTBCR2 does not require flushing
4340  * qemu tlbs nor adjusting cached masks.
4341  */
4342 static const ARMCPRegInfo ttbcr2_reginfo = {
4343     .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
4344     .access = PL1_RW, .accessfn = access_tvm_trvm,
4345     .type = ARM_CP_ALIAS,
4346     .bank_fieldoffsets = {
4347         offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
4348         offsetofhigh32(CPUARMState, cp15.tcr_el[1]),
4349     },
4350 };
4351 
4352 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
4353                                 uint64_t value)
4354 {
4355     env->cp15.c15_ticonfig = value & 0xe7;
4356     /* The OS_TYPE bit in this register changes the reported CPUID! */
4357     env->cp15.c0_cpuid = (value & (1 << 5)) ?
4358         ARM_CPUID_TI915T : ARM_CPUID_TI925T;
4359 }
4360 
4361 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
4362                                 uint64_t value)
4363 {
4364     env->cp15.c15_threadid = value & 0xffff;
4365 }
4366 
4367 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
4368                            uint64_t value)
4369 {
4370     /* Wait-for-interrupt (deprecated) */
4371     cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
4372 }
4373 
4374 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
4375                                   uint64_t value)
4376 {
4377     /*
4378      * On OMAP there are registers indicating the max/min index of dcache lines
4379      * containing a dirty line; cache flush operations have to reset these.
4380      */
4381     env->cp15.c15_i_max = 0x000;
4382     env->cp15.c15_i_min = 0xff0;
4383 }
4384 
4385 static const ARMCPRegInfo omap_cp_reginfo[] = {
4386     { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
4387       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
4388       .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
4389       .resetvalue = 0, },
4390     { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
4391       .access = PL1_RW, .type = ARM_CP_NOP },
4392     { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
4393       .access = PL1_RW,
4394       .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
4395       .writefn = omap_ticonfig_write },
4396     { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
4397       .access = PL1_RW,
4398       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
4399     { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
4400       .access = PL1_RW, .resetvalue = 0xff0,
4401       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
4402     { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
4403       .access = PL1_RW,
4404       .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
4405       .writefn = omap_threadid_write },
4406     { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
4407       .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
4408       .type = ARM_CP_NO_RAW,
4409       .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
4410     /*
4411      * TODO: Peripheral port remap register:
4412      * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
4413      * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
4414      * when MMU is off.
4415      */
4416     { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
4417       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
4418       .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
4419       .writefn = omap_cachemaint_write },
4420     { .name = "C9", .cp = 15, .crn = 9,
4421       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
4422       .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
4423 };
4424 
4425 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4426                               uint64_t value)
4427 {
4428     env->cp15.c15_cpar = value & 0x3fff;
4429 }
4430 
4431 static const ARMCPRegInfo xscale_cp_reginfo[] = {
4432     { .name = "XSCALE_CPAR",
4433       .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
4434       .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
4435       .writefn = xscale_cpar_write, },
4436     { .name = "XSCALE_AUXCR",
4437       .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
4438       .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
4439       .resetvalue = 0, },
4440     /*
4441      * XScale specific cache-lockdown: since we have no cache we NOP these
4442      * and hope the guest does not really rely on cache behaviour.
4443      */
4444     { .name = "XSCALE_LOCK_ICACHE_LINE",
4445       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
4446       .access = PL1_W, .type = ARM_CP_NOP },
4447     { .name = "XSCALE_UNLOCK_ICACHE",
4448       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
4449       .access = PL1_W, .type = ARM_CP_NOP },
4450     { .name = "XSCALE_DCACHE_LOCK",
4451       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
4452       .access = PL1_RW, .type = ARM_CP_NOP },
4453     { .name = "XSCALE_UNLOCK_DCACHE",
4454       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
4455       .access = PL1_W, .type = ARM_CP_NOP },
4456 };
4457 
4458 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
4459     /*
4460      * RAZ/WI the whole crn=15 space, when we don't have a more specific
4461      * implementation of this implementation-defined space.
4462      * Ideally this should eventually disappear in favour of actually
4463      * implementing the correct behaviour for all cores.
4464      */
4465     { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
4466       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4467       .access = PL1_RW,
4468       .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
4469       .resetvalue = 0 },
4470 };
4471 
4472 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
4473     /* Cache status: RAZ because we have no cache so it's always clean */
4474     { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
4475       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4476       .resetvalue = 0 },
4477 };
4478 
4479 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
4480     /* We never have a block transfer operation in progress */
4481     { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
4482       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4483       .resetvalue = 0 },
4484     /* The cache ops themselves: these all NOP for QEMU */
4485     { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
4486       .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4487     { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
4488       .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4489     { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
4490       .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4491     { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
4492       .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4493     { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
4494       .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4495     { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
4496       .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4497 };
4498 
4499 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
4500     /*
4501      * The cache test-and-clean instructions always return (1 << 30)
4502      * to indicate that there are no dirty cache lines.
4503      */
4504     { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
4505       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4506       .resetvalue = (1 << 30) },
4507     { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
4508       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4509       .resetvalue = (1 << 30) },
4510 };
4511 
4512 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
4513     /* Ignore ReadBuffer accesses */
4514     { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
4515       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4516       .access = PL1_RW, .resetvalue = 0,
4517       .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
4518 };
4519 
4520 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4521 {
4522     unsigned int cur_el = arm_current_el(env);
4523 
4524     if (arm_is_el2_enabled(env) && cur_el == 1) {
4525         return env->cp15.vpidr_el2;
4526     }
4527     return raw_read(env, ri);
4528 }
4529 
4530 static uint64_t mpidr_read_val(CPUARMState *env)
4531 {
4532     ARMCPU *cpu = env_archcpu(env);
4533     uint64_t mpidr = cpu->mp_affinity;
4534 
4535     if (arm_feature(env, ARM_FEATURE_V7MP)) {
4536         mpidr |= (1U << 31);
4537         /*
4538          * Cores which are uniprocessor (non-coherent)
4539          * but still implement the MP extensions set
4540          * bit 30. (For instance, Cortex-R5).
4541          */
4542         if (cpu->mp_is_up) {
4543             mpidr |= (1u << 30);
4544         }
4545     }
4546     return mpidr;
4547 }
4548 
4549 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4550 {
4551     unsigned int cur_el = arm_current_el(env);
4552 
4553     if (arm_is_el2_enabled(env) && cur_el == 1) {
4554         return env->cp15.vmpidr_el2;
4555     }
4556     return mpidr_read_val(env);
4557 }
4558 
4559 static const ARMCPRegInfo lpae_cp_reginfo[] = {
4560     /* NOP AMAIR0/1 */
4561     { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
4562       .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
4563       .access = PL1_RW, .accessfn = access_tvm_trvm,
4564       .fgt = FGT_AMAIR_EL1,
4565       .nv2_redirect_offset = 0x148 | NV2_REDIR_NV1,
4566       .type = ARM_CP_CONST, .resetvalue = 0 },
4567     /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
4568     { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
4569       .access = PL1_RW, .accessfn = access_tvm_trvm,
4570       .type = ARM_CP_CONST, .resetvalue = 0 },
4571     { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
4572       .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
4573       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
4574                              offsetof(CPUARMState, cp15.par_ns)} },
4575     { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
4576       .access = PL1_RW, .accessfn = access_tvm_trvm,
4577       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4578       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4579                              offsetof(CPUARMState, cp15.ttbr0_ns) },
4580       .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
4581     { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
4582       .access = PL1_RW, .accessfn = access_tvm_trvm,
4583       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4584       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4585                              offsetof(CPUARMState, cp15.ttbr1_ns) },
4586       .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
4587 };
4588 
4589 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4590 {
4591     return vfp_get_fpcr(env);
4592 }
4593 
4594 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4595                             uint64_t value)
4596 {
4597     vfp_set_fpcr(env, value);
4598 }
4599 
4600 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4601 {
4602     return vfp_get_fpsr(env);
4603 }
4604 
4605 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4606                             uint64_t value)
4607 {
4608     vfp_set_fpsr(env, value);
4609 }
4610 
4611 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
4612                                        bool isread)
4613 {
4614     if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
4615         return CP_ACCESS_TRAP;
4616     }
4617     return CP_ACCESS_OK;
4618 }
4619 
4620 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
4621                             uint64_t value)
4622 {
4623     env->daif = value & PSTATE_DAIF;
4624 }
4625 
4626 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
4627 {
4628     return env->pstate & PSTATE_PAN;
4629 }
4630 
4631 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
4632                            uint64_t value)
4633 {
4634     env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
4635 }
4636 
4637 static const ARMCPRegInfo pan_reginfo = {
4638     .name = "PAN", .state = ARM_CP_STATE_AA64,
4639     .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
4640     .type = ARM_CP_NO_RAW, .access = PL1_RW,
4641     .readfn = aa64_pan_read, .writefn = aa64_pan_write
4642 };
4643 
4644 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
4645 {
4646     return env->pstate & PSTATE_UAO;
4647 }
4648 
4649 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
4650                            uint64_t value)
4651 {
4652     env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
4653 }
4654 
4655 static const ARMCPRegInfo uao_reginfo = {
4656     .name = "UAO", .state = ARM_CP_STATE_AA64,
4657     .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
4658     .type = ARM_CP_NO_RAW, .access = PL1_RW,
4659     .readfn = aa64_uao_read, .writefn = aa64_uao_write
4660 };
4661 
4662 static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
4663 {
4664     return env->pstate & PSTATE_DIT;
4665 }
4666 
4667 static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
4668                            uint64_t value)
4669 {
4670     env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
4671 }
4672 
4673 static const ARMCPRegInfo dit_reginfo = {
4674     .name = "DIT", .state = ARM_CP_STATE_AA64,
4675     .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
4676     .type = ARM_CP_NO_RAW, .access = PL0_RW,
4677     .readfn = aa64_dit_read, .writefn = aa64_dit_write
4678 };
4679 
4680 static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri)
4681 {
4682     return env->pstate & PSTATE_SSBS;
4683 }
4684 
4685 static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
4686                            uint64_t value)
4687 {
4688     env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS);
4689 }
4690 
4691 static const ARMCPRegInfo ssbs_reginfo = {
4692     .name = "SSBS", .state = ARM_CP_STATE_AA64,
4693     .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6,
4694     .type = ARM_CP_NO_RAW, .access = PL0_RW,
4695     .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write
4696 };
4697 
4698 static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
4699                                               const ARMCPRegInfo *ri,
4700                                               bool isread)
4701 {
4702     /* Cache invalidate/clean to Point of Coherency or Persistence...  */
4703     switch (arm_current_el(env)) {
4704     case 0:
4705         /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
4706         if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4707             return CP_ACCESS_TRAP;
4708         }
4709         /* fall through */
4710     case 1:
4711         /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set.  */
4712         if (arm_hcr_el2_eff(env) & HCR_TPCP) {
4713             return CP_ACCESS_TRAP_EL2;
4714         }
4715         break;
4716     }
4717     return CP_ACCESS_OK;
4718 }
4719 
4720 static CPAccessResult do_cacheop_pou_access(CPUARMState *env, uint64_t hcrflags)
4721 {
4722     /* Cache invalidate/clean to Point of Unification... */
4723     switch (arm_current_el(env)) {
4724     case 0:
4725         /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
4726         if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4727             return CP_ACCESS_TRAP;
4728         }
4729         /* fall through */
4730     case 1:
4731         /* ... EL1 must trap to EL2 if relevant HCR_EL2 flags are set.  */
4732         if (arm_hcr_el2_eff(env) & hcrflags) {
4733             return CP_ACCESS_TRAP_EL2;
4734         }
4735         break;
4736     }
4737     return CP_ACCESS_OK;
4738 }
4739 
4740 static CPAccessResult access_ticab(CPUARMState *env, const ARMCPRegInfo *ri,
4741                                    bool isread)
4742 {
4743     return do_cacheop_pou_access(env, HCR_TICAB | HCR_TPU);
4744 }
4745 
4746 static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri,
4747                                   bool isread)
4748 {
4749     return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU);
4750 }
4751 
4752 /*
4753  * See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4754  * Page D4-1736 (DDI0487A.b)
4755  */
4756 
4757 static int vae1_tlbmask(CPUARMState *env)
4758 {
4759     uint64_t hcr = arm_hcr_el2_eff(env);
4760     uint16_t mask;
4761 
4762     if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4763         mask = ARMMMUIdxBit_E20_2 |
4764                ARMMMUIdxBit_E20_2_PAN |
4765                ARMMMUIdxBit_E20_0;
4766     } else {
4767         mask = ARMMMUIdxBit_E10_1 |
4768                ARMMMUIdxBit_E10_1_PAN |
4769                ARMMMUIdxBit_E10_0;
4770     }
4771     return mask;
4772 }
4773 
4774 static int vae2_tlbmask(CPUARMState *env)
4775 {
4776     uint64_t hcr = arm_hcr_el2_eff(env);
4777     uint16_t mask;
4778 
4779     if (hcr & HCR_E2H) {
4780         mask = ARMMMUIdxBit_E20_2 |
4781                ARMMMUIdxBit_E20_2_PAN |
4782                ARMMMUIdxBit_E20_0;
4783     } else {
4784         mask = ARMMMUIdxBit_E2;
4785     }
4786     return mask;
4787 }
4788 
4789 /* Return 56 if TBI is enabled, 64 otherwise. */
4790 static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
4791                               uint64_t addr)
4792 {
4793     uint64_t tcr = regime_tcr(env, mmu_idx);
4794     int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
4795     int select = extract64(addr, 55, 1);
4796 
4797     return (tbi >> select) & 1 ? 56 : 64;
4798 }
4799 
4800 static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
4801 {
4802     uint64_t hcr = arm_hcr_el2_eff(env);
4803     ARMMMUIdx mmu_idx;
4804 
4805     /* Only the regime of the mmu_idx below is significant. */
4806     if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4807         mmu_idx = ARMMMUIdx_E20_0;
4808     } else {
4809         mmu_idx = ARMMMUIdx_E10_0;
4810     }
4811 
4812     return tlbbits_for_regime(env, mmu_idx, addr);
4813 }
4814 
4815 static int vae2_tlbbits(CPUARMState *env, uint64_t addr)
4816 {
4817     uint64_t hcr = arm_hcr_el2_eff(env);
4818     ARMMMUIdx mmu_idx;
4819 
4820     /*
4821      * Only the regime of the mmu_idx below is significant.
4822      * Regime EL2&0 has two ranges with separate TBI configuration, while EL2
4823      * only has one.
4824      */
4825     if (hcr & HCR_E2H) {
4826         mmu_idx = ARMMMUIdx_E20_2;
4827     } else {
4828         mmu_idx = ARMMMUIdx_E2;
4829     }
4830 
4831     return tlbbits_for_regime(env, mmu_idx, addr);
4832 }
4833 
4834 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4835                                       uint64_t value)
4836 {
4837     CPUState *cs = env_cpu(env);
4838     int mask = vae1_tlbmask(env);
4839 
4840     tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4841 }
4842 
4843 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4844                                     uint64_t value)
4845 {
4846     CPUState *cs = env_cpu(env);
4847     int mask = vae1_tlbmask(env);
4848 
4849     if (tlb_force_broadcast(env)) {
4850         tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4851     } else {
4852         tlb_flush_by_mmuidx(cs, mask);
4853     }
4854 }
4855 
4856 static int e2_tlbmask(CPUARMState *env)
4857 {
4858     return (ARMMMUIdxBit_E20_0 |
4859             ARMMMUIdxBit_E20_2 |
4860             ARMMMUIdxBit_E20_2_PAN |
4861             ARMMMUIdxBit_E2);
4862 }
4863 
4864 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4865                                   uint64_t value)
4866 {
4867     CPUState *cs = env_cpu(env);
4868     int mask = alle1_tlbmask(env);
4869 
4870     tlb_flush_by_mmuidx(cs, mask);
4871 }
4872 
4873 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4874                                   uint64_t value)
4875 {
4876     CPUState *cs = env_cpu(env);
4877     int mask = e2_tlbmask(env);
4878 
4879     tlb_flush_by_mmuidx(cs, mask);
4880 }
4881 
4882 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4883                                   uint64_t value)
4884 {
4885     ARMCPU *cpu = env_archcpu(env);
4886     CPUState *cs = CPU(cpu);
4887 
4888     tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3);
4889 }
4890 
4891 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4892                                     uint64_t value)
4893 {
4894     CPUState *cs = env_cpu(env);
4895     int mask = alle1_tlbmask(env);
4896 
4897     tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4898 }
4899 
4900 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4901                                     uint64_t value)
4902 {
4903     CPUState *cs = env_cpu(env);
4904     int mask = e2_tlbmask(env);
4905 
4906     tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4907 }
4908 
4909 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4910                                     uint64_t value)
4911 {
4912     CPUState *cs = env_cpu(env);
4913 
4914     tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3);
4915 }
4916 
4917 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4918                                  uint64_t value)
4919 {
4920     /*
4921      * Invalidate by VA, EL2
4922      * Currently handles both VAE2 and VALE2, since we don't support
4923      * flush-last-level-only.
4924      */
4925     CPUState *cs = env_cpu(env);
4926     int mask = vae2_tlbmask(env);
4927     uint64_t pageaddr = sextract64(value << 12, 0, 56);
4928     int bits = vae2_tlbbits(env, pageaddr);
4929 
4930     tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
4931 }
4932 
4933 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4934                                  uint64_t value)
4935 {
4936     /*
4937      * Invalidate by VA, EL3
4938      * Currently handles both VAE3 and VALE3, since we don't support
4939      * flush-last-level-only.
4940      */
4941     ARMCPU *cpu = env_archcpu(env);
4942     CPUState *cs = CPU(cpu);
4943     uint64_t pageaddr = sextract64(value << 12, 0, 56);
4944 
4945     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3);
4946 }
4947 
4948 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4949                                    uint64_t value)
4950 {
4951     CPUState *cs = env_cpu(env);
4952     int mask = vae1_tlbmask(env);
4953     uint64_t pageaddr = sextract64(value << 12, 0, 56);
4954     int bits = vae1_tlbbits(env, pageaddr);
4955 
4956     tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
4957 }
4958 
4959 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4960                                  uint64_t value)
4961 {
4962     /*
4963      * Invalidate by VA, EL1&0 (AArch64 version).
4964      * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4965      * since we don't support flush-for-specific-ASID-only or
4966      * flush-last-level-only.
4967      */
4968     CPUState *cs = env_cpu(env);
4969     int mask = vae1_tlbmask(env);
4970     uint64_t pageaddr = sextract64(value << 12, 0, 56);
4971     int bits = vae1_tlbbits(env, pageaddr);
4972 
4973     if (tlb_force_broadcast(env)) {
4974         tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
4975     } else {
4976         tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
4977     }
4978 }
4979 
4980 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4981                                    uint64_t value)
4982 {
4983     CPUState *cs = env_cpu(env);
4984     int mask = vae2_tlbmask(env);
4985     uint64_t pageaddr = sextract64(value << 12, 0, 56);
4986     int bits = vae2_tlbbits(env, pageaddr);
4987 
4988     tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
4989 }
4990 
4991 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4992                                    uint64_t value)
4993 {
4994     CPUState *cs = env_cpu(env);
4995     uint64_t pageaddr = sextract64(value << 12, 0, 56);
4996     int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr);
4997 
4998     tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
4999                                                   ARMMMUIdxBit_E3, bits);
5000 }
5001 
5002 static int ipas2e1_tlbmask(CPUARMState *env, int64_t value)
5003 {
5004     /*
5005      * The MSB of value is the NS field, which only applies if SEL2
5006      * is implemented and SCR_EL3.NS is not set (i.e. in secure mode).
5007      */
5008     return (value >= 0
5009             && cpu_isar_feature(aa64_sel2, env_archcpu(env))
5010             && arm_is_secure_below_el3(env)
5011             ? ARMMMUIdxBit_Stage2_S
5012             : ARMMMUIdxBit_Stage2);
5013 }
5014 
5015 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
5016                                     uint64_t value)
5017 {
5018     CPUState *cs = env_cpu(env);
5019     int mask = ipas2e1_tlbmask(env, value);
5020     uint64_t pageaddr = sextract64(value << 12, 0, 56);
5021 
5022     if (tlb_force_broadcast(env)) {
5023         tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
5024     } else {
5025         tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
5026     }
5027 }
5028 
5029 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
5030                                       uint64_t value)
5031 {
5032     CPUState *cs = env_cpu(env);
5033     int mask = ipas2e1_tlbmask(env, value);
5034     uint64_t pageaddr = sextract64(value << 12, 0, 56);
5035 
5036     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
5037 }
5038 
5039 #ifdef TARGET_AARCH64
5040 typedef struct {
5041     uint64_t base;
5042     uint64_t length;
5043 } TLBIRange;
5044 
5045 static ARMGranuleSize tlbi_range_tg_to_gran_size(int tg)
5046 {
5047     /*
5048      * Note that the TLBI range TG field encoding differs from both
5049      * TG0 and TG1 encodings.
5050      */
5051     switch (tg) {
5052     case 1:
5053         return Gran4K;
5054     case 2:
5055         return Gran16K;
5056     case 3:
5057         return Gran64K;
5058     default:
5059         return GranInvalid;
5060     }
5061 }
5062 
5063 static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
5064                                      uint64_t value)
5065 {
5066     unsigned int page_size_granule, page_shift, num, scale, exponent;
5067     /* Extract one bit to represent the va selector in use. */
5068     uint64_t select = sextract64(value, 36, 1);
5069     ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true, false);
5070     TLBIRange ret = { };
5071     ARMGranuleSize gran;
5072 
5073     page_size_granule = extract64(value, 46, 2);
5074     gran = tlbi_range_tg_to_gran_size(page_size_granule);
5075 
5076     /* The granule encoded in value must match the granule in use. */
5077     if (gran != param.gran) {
5078         qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n",
5079                       page_size_granule);
5080         return ret;
5081     }
5082 
5083     page_shift = arm_granule_bits(gran);
5084     num = extract64(value, 39, 5);
5085     scale = extract64(value, 44, 2);
5086     exponent = (5 * scale) + 1;
5087 
5088     ret.length = (num + 1) << (exponent + page_shift);
5089 
5090     if (param.select) {
5091         ret.base = sextract64(value, 0, 37);
5092     } else {
5093         ret.base = extract64(value, 0, 37);
5094     }
5095     if (param.ds) {
5096         /*
5097          * With DS=1, BaseADDR is always shifted 16 so that it is able
5098          * to address all 52 va bits.  The input address is perforce
5099          * aligned on a 64k boundary regardless of translation granule.
5100          */
5101         page_shift = 16;
5102     }
5103     ret.base <<= page_shift;
5104 
5105     return ret;
5106 }
5107 
5108 static void do_rvae_write(CPUARMState *env, uint64_t value,
5109                           int idxmap, bool synced)
5110 {
5111     ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
5112     TLBIRange range;
5113     int bits;
5114 
5115     range = tlbi_aa64_get_range(env, one_idx, value);
5116     bits = tlbbits_for_regime(env, one_idx, range.base);
5117 
5118     if (synced) {
5119         tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
5120                                                   range.base,
5121                                                   range.length,
5122                                                   idxmap,
5123                                                   bits);
5124     } else {
5125         tlb_flush_range_by_mmuidx(env_cpu(env), range.base,
5126                                   range.length, idxmap, bits);
5127     }
5128 }
5129 
5130 static void tlbi_aa64_rvae1_write(CPUARMState *env,
5131                                   const ARMCPRegInfo *ri,
5132                                   uint64_t value)
5133 {
5134     /*
5135      * Invalidate by VA range, EL1&0.
5136      * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
5137      * since we don't support flush-for-specific-ASID-only or
5138      * flush-last-level-only.
5139      */
5140 
5141     do_rvae_write(env, value, vae1_tlbmask(env),
5142                   tlb_force_broadcast(env));
5143 }
5144 
5145 static void tlbi_aa64_rvae1is_write(CPUARMState *env,
5146                                     const ARMCPRegInfo *ri,
5147                                     uint64_t value)
5148 {
5149     /*
5150      * Invalidate by VA range, Inner/Outer Shareable EL1&0.
5151      * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
5152      * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
5153      * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
5154      * shareable specific flushes.
5155      */
5156 
5157     do_rvae_write(env, value, vae1_tlbmask(env), true);
5158 }
5159 
5160 static void tlbi_aa64_rvae2_write(CPUARMState *env,
5161                                   const ARMCPRegInfo *ri,
5162                                   uint64_t value)
5163 {
5164     /*
5165      * Invalidate by VA range, EL2.
5166      * Currently handles all of RVAE2 and RVALE2,
5167      * since we don't support flush-for-specific-ASID-only or
5168      * flush-last-level-only.
5169      */
5170 
5171     do_rvae_write(env, value, vae2_tlbmask(env),
5172                   tlb_force_broadcast(env));
5173 
5174 
5175 }
5176 
5177 static void tlbi_aa64_rvae2is_write(CPUARMState *env,
5178                                     const ARMCPRegInfo *ri,
5179                                     uint64_t value)
5180 {
5181     /*
5182      * Invalidate by VA range, Inner/Outer Shareable, EL2.
5183      * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
5184      * since we don't support flush-for-specific-ASID-only,
5185      * flush-last-level-only or inner/outer shareable specific flushes.
5186      */
5187 
5188     do_rvae_write(env, value, vae2_tlbmask(env), true);
5189 
5190 }
5191 
5192 static void tlbi_aa64_rvae3_write(CPUARMState *env,
5193                                   const ARMCPRegInfo *ri,
5194                                   uint64_t value)
5195 {
5196     /*
5197      * Invalidate by VA range, EL3.
5198      * Currently handles all of RVAE3 and RVALE3,
5199      * since we don't support flush-for-specific-ASID-only or
5200      * flush-last-level-only.
5201      */
5202 
5203     do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env));
5204 }
5205 
5206 static void tlbi_aa64_rvae3is_write(CPUARMState *env,
5207                                     const ARMCPRegInfo *ri,
5208                                     uint64_t value)
5209 {
5210     /*
5211      * Invalidate by VA range, EL3, Inner/Outer Shareable.
5212      * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
5213      * since we don't support flush-for-specific-ASID-only,
5214      * flush-last-level-only or inner/outer specific flushes.
5215      */
5216 
5217     do_rvae_write(env, value, ARMMMUIdxBit_E3, true);
5218 }
5219 
5220 static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
5221                                      uint64_t value)
5222 {
5223     do_rvae_write(env, value, ipas2e1_tlbmask(env, value),
5224                   tlb_force_broadcast(env));
5225 }
5226 
5227 static void tlbi_aa64_ripas2e1is_write(CPUARMState *env,
5228                                        const ARMCPRegInfo *ri,
5229                                        uint64_t value)
5230 {
5231     do_rvae_write(env, value, ipas2e1_tlbmask(env, value), true);
5232 }
5233 #endif
5234 
5235 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
5236                                       bool isread)
5237 {
5238     int cur_el = arm_current_el(env);
5239 
5240     if (cur_el < 2) {
5241         uint64_t hcr = arm_hcr_el2_eff(env);
5242 
5243         if (cur_el == 0) {
5244             if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
5245                 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
5246                     return CP_ACCESS_TRAP_EL2;
5247                 }
5248             } else {
5249                 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
5250                     return CP_ACCESS_TRAP;
5251                 }
5252                 if (hcr & HCR_TDZ) {
5253                     return CP_ACCESS_TRAP_EL2;
5254                 }
5255             }
5256         } else if (hcr & HCR_TDZ) {
5257             return CP_ACCESS_TRAP_EL2;
5258         }
5259     }
5260     return CP_ACCESS_OK;
5261 }
5262 
5263 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
5264 {
5265     ARMCPU *cpu = env_archcpu(env);
5266     int dzp_bit = 1 << 4;
5267 
5268     /* DZP indicates whether DC ZVA access is allowed */
5269     if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
5270         dzp_bit = 0;
5271     }
5272     return cpu->dcz_blocksize | dzp_bit;
5273 }
5274 
5275 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
5276                                     bool isread)
5277 {
5278     if (!(env->pstate & PSTATE_SP)) {
5279         /*
5280          * Access to SP_EL0 is undefined if it's being used as
5281          * the stack pointer.
5282          */
5283         return CP_ACCESS_TRAP_UNCATEGORIZED;
5284     }
5285     return CP_ACCESS_OK;
5286 }
5287 
5288 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
5289 {
5290     return env->pstate & PSTATE_SP;
5291 }
5292 
5293 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
5294 {
5295     update_spsel(env, val);
5296 }
5297 
5298 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5299                         uint64_t value)
5300 {
5301     ARMCPU *cpu = env_archcpu(env);
5302 
5303     if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
5304         /* M bit is RAZ/WI for PMSA with no MPU implemented */
5305         value &= ~SCTLR_M;
5306     }
5307 
5308     /* ??? Lots of these bits are not implemented.  */
5309 
5310     if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
5311         if (ri->opc1 == 6) { /* SCTLR_EL3 */
5312             value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
5313         } else {
5314             value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
5315                        SCTLR_ATA0 | SCTLR_ATA);
5316         }
5317     }
5318 
5319     if (raw_read(env, ri) == value) {
5320         /*
5321          * Skip the TLB flush if nothing actually changed; Linux likes
5322          * to do a lot of pointless SCTLR writes.
5323          */
5324         return;
5325     }
5326 
5327     raw_write(env, ri, value);
5328 
5329     /* This may enable/disable the MMU, so do a TLB flush.  */
5330     tlb_flush(CPU(cpu));
5331 
5332     if (tcg_enabled() && ri->type & ARM_CP_SUPPRESS_TB_END) {
5333         /*
5334          * Normally we would always end the TB on an SCTLR write; see the
5335          * comment in ARMCPRegInfo sctlr initialization below for why Xscale
5336          * is special.  Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
5337          * of hflags from the translator, so do it here.
5338          */
5339         arm_rebuild_hflags(env);
5340     }
5341 }
5342 
5343 static void mdcr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
5344                            uint64_t value)
5345 {
5346     /*
5347      * Some MDCR_EL3 bits affect whether PMU counters are running:
5348      * if we are trying to change any of those then we must
5349      * bracket this update with PMU start/finish calls.
5350      */
5351     bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS;
5352 
5353     if (pmu_op) {
5354         pmu_op_start(env);
5355     }
5356     env->cp15.mdcr_el3 = value;
5357     if (pmu_op) {
5358         pmu_op_finish(env);
5359     }
5360 }
5361 
5362 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5363                        uint64_t value)
5364 {
5365     /* Not all bits defined for MDCR_EL3 exist in the AArch32 SDCR */
5366     mdcr_el3_write(env, ri, value & SDCR_VALID_MASK);
5367 }
5368 
5369 static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
5370                            uint64_t value)
5371 {
5372     /*
5373      * Some MDCR_EL2 bits affect whether PMU counters are running:
5374      * if we are trying to change any of those then we must
5375      * bracket this update with PMU start/finish calls.
5376      */
5377     bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS;
5378 
5379     if (pmu_op) {
5380         pmu_op_start(env);
5381     }
5382     env->cp15.mdcr_el2 = value;
5383     if (pmu_op) {
5384         pmu_op_finish(env);
5385     }
5386 }
5387 
5388 static CPAccessResult access_nv1(CPUARMState *env, const ARMCPRegInfo *ri,
5389                                  bool isread)
5390 {
5391     if (arm_current_el(env) == 1) {
5392         uint64_t hcr_nv = arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1 | HCR_NV2);
5393 
5394         if (hcr_nv == (HCR_NV | HCR_NV1)) {
5395             return CP_ACCESS_TRAP_EL2;
5396         }
5397     }
5398     return CP_ACCESS_OK;
5399 }
5400 
5401 #ifdef CONFIG_USER_ONLY
5402 /*
5403  * `IC IVAU` is handled to improve compatibility with JITs that dual-map their
5404  * code to get around W^X restrictions, where one region is writable and the
5405  * other is executable.
5406  *
5407  * Since the executable region is never written to we cannot detect code
5408  * changes when running in user mode, and rely on the emulated JIT telling us
5409  * that the code has changed by executing this instruction.
5410  */
5411 static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri,
5412                           uint64_t value)
5413 {
5414     uint64_t icache_line_mask, start_address, end_address;
5415     const ARMCPU *cpu;
5416 
5417     cpu = env_archcpu(env);
5418 
5419     icache_line_mask = (4 << extract32(cpu->ctr, 0, 4)) - 1;
5420     start_address = value & ~icache_line_mask;
5421     end_address = value | icache_line_mask;
5422 
5423     mmap_lock();
5424 
5425     tb_invalidate_phys_range(start_address, end_address);
5426 
5427     mmap_unlock();
5428 }
5429 #endif
5430 
5431 static const ARMCPRegInfo v8_cp_reginfo[] = {
5432     /*
5433      * Minimal set of EL0-visible registers. This will need to be expanded
5434      * significantly for system emulation of AArch64 CPUs.
5435      */
5436     { .name = "NZCV", .state = ARM_CP_STATE_AA64,
5437       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
5438       .access = PL0_RW, .type = ARM_CP_NZCV },
5439     { .name = "DAIF", .state = ARM_CP_STATE_AA64,
5440       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
5441       .type = ARM_CP_NO_RAW,
5442       .access = PL0_RW, .accessfn = aa64_daif_access,
5443       .fieldoffset = offsetof(CPUARMState, daif),
5444       .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
5445     { .name = "FPCR", .state = ARM_CP_STATE_AA64,
5446       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
5447       .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
5448       .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
5449     { .name = "FPSR", .state = ARM_CP_STATE_AA64,
5450       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
5451       .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
5452       .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
5453     { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
5454       .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
5455       .access = PL0_R, .type = ARM_CP_NO_RAW,
5456       .fgt = FGT_DCZID_EL0,
5457       .readfn = aa64_dczid_read },
5458     { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
5459       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
5460       .access = PL0_W, .type = ARM_CP_DC_ZVA,
5461 #ifndef CONFIG_USER_ONLY
5462       /* Avoid overhead of an access check that always passes in user-mode */
5463       .accessfn = aa64_zva_access,
5464       .fgt = FGT_DCZVA,
5465 #endif
5466     },
5467     { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
5468       .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
5469       .access = PL1_R, .type = ARM_CP_CURRENTEL },
5470     /*
5471      * Instruction cache ops. All of these except `IC IVAU` NOP because we
5472      * don't emulate caches.
5473      */
5474     { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
5475       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
5476       .access = PL1_W, .type = ARM_CP_NOP,
5477       .fgt = FGT_ICIALLUIS,
5478       .accessfn = access_ticab },
5479     { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
5480       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
5481       .access = PL1_W, .type = ARM_CP_NOP,
5482       .fgt = FGT_ICIALLU,
5483       .accessfn = access_tocu },
5484     { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
5485       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
5486       .access = PL0_W,
5487       .fgt = FGT_ICIVAU,
5488       .accessfn = access_tocu,
5489 #ifdef CONFIG_USER_ONLY
5490       .type = ARM_CP_NO_RAW,
5491       .writefn = ic_ivau_write
5492 #else
5493       .type = ARM_CP_NOP
5494 #endif
5495     },
5496     /* Cache ops: all NOPs since we don't emulate caches */
5497     { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
5498       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
5499       .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
5500       .fgt = FGT_DCIVAC,
5501       .type = ARM_CP_NOP },
5502     { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
5503       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
5504       .fgt = FGT_DCISW,
5505       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
5506     { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
5507       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
5508       .access = PL0_W, .type = ARM_CP_NOP,
5509       .fgt = FGT_DCCVAC,
5510       .accessfn = aa64_cacheop_poc_access },
5511     { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
5512       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
5513       .fgt = FGT_DCCSW,
5514       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
5515     { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
5516       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
5517       .access = PL0_W, .type = ARM_CP_NOP,
5518       .fgt = FGT_DCCVAU,
5519       .accessfn = access_tocu },
5520     { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
5521       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
5522       .access = PL0_W, .type = ARM_CP_NOP,
5523       .fgt = FGT_DCCIVAC,
5524       .accessfn = aa64_cacheop_poc_access },
5525     { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
5526       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
5527       .fgt = FGT_DCCISW,
5528       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
5529     /* TLBI operations */
5530     { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
5531       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
5532       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5533       .fgt = FGT_TLBIVMALLE1IS,
5534       .writefn = tlbi_aa64_vmalle1is_write },
5535     { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
5536       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
5537       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5538       .fgt = FGT_TLBIVAE1IS,
5539       .writefn = tlbi_aa64_vae1is_write },
5540     { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
5541       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
5542       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5543       .fgt = FGT_TLBIASIDE1IS,
5544       .writefn = tlbi_aa64_vmalle1is_write },
5545     { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
5546       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
5547       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5548       .fgt = FGT_TLBIVAAE1IS,
5549       .writefn = tlbi_aa64_vae1is_write },
5550     { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
5551       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
5552       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5553       .fgt = FGT_TLBIVALE1IS,
5554       .writefn = tlbi_aa64_vae1is_write },
5555     { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
5556       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
5557       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5558       .fgt = FGT_TLBIVAALE1IS,
5559       .writefn = tlbi_aa64_vae1is_write },
5560     { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
5561       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
5562       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5563       .fgt = FGT_TLBIVMALLE1,
5564       .writefn = tlbi_aa64_vmalle1_write },
5565     { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
5566       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
5567       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5568       .fgt = FGT_TLBIVAE1,
5569       .writefn = tlbi_aa64_vae1_write },
5570     { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
5571       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
5572       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5573       .fgt = FGT_TLBIASIDE1,
5574       .writefn = tlbi_aa64_vmalle1_write },
5575     { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
5576       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
5577       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5578       .fgt = FGT_TLBIVAAE1,
5579       .writefn = tlbi_aa64_vae1_write },
5580     { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
5581       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
5582       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5583       .fgt = FGT_TLBIVALE1,
5584       .writefn = tlbi_aa64_vae1_write },
5585     { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
5586       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
5587       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5588       .fgt = FGT_TLBIVAALE1,
5589       .writefn = tlbi_aa64_vae1_write },
5590     { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
5591       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
5592       .access = PL2_W, .type = ARM_CP_NO_RAW,
5593       .writefn = tlbi_aa64_ipas2e1is_write },
5594     { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
5595       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
5596       .access = PL2_W, .type = ARM_CP_NO_RAW,
5597       .writefn = tlbi_aa64_ipas2e1is_write },
5598     { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
5599       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
5600       .access = PL2_W, .type = ARM_CP_NO_RAW,
5601       .writefn = tlbi_aa64_alle1is_write },
5602     { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
5603       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
5604       .access = PL2_W, .type = ARM_CP_NO_RAW,
5605       .writefn = tlbi_aa64_alle1is_write },
5606     { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
5607       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
5608       .access = PL2_W, .type = ARM_CP_NO_RAW,
5609       .writefn = tlbi_aa64_ipas2e1_write },
5610     { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
5611       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
5612       .access = PL2_W, .type = ARM_CP_NO_RAW,
5613       .writefn = tlbi_aa64_ipas2e1_write },
5614     { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
5615       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
5616       .access = PL2_W, .type = ARM_CP_NO_RAW,
5617       .writefn = tlbi_aa64_alle1_write },
5618     { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
5619       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
5620       .access = PL2_W, .type = ARM_CP_NO_RAW,
5621       .writefn = tlbi_aa64_alle1is_write },
5622 #ifndef CONFIG_USER_ONLY
5623     /* 64 bit address translation operations */
5624     { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
5625       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
5626       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5627       .fgt = FGT_ATS1E1R,
5628       .accessfn = at_s1e01_access, .writefn = ats_write64 },
5629     { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
5630       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
5631       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5632       .fgt = FGT_ATS1E1W,
5633       .accessfn = at_s1e01_access, .writefn = ats_write64 },
5634     { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
5635       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
5636       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5637       .fgt = FGT_ATS1E0R,
5638       .accessfn = at_s1e01_access, .writefn = ats_write64 },
5639     { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
5640       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
5641       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5642       .fgt = FGT_ATS1E0W,
5643       .accessfn = at_s1e01_access, .writefn = ats_write64 },
5644     { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
5645       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
5646       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5647       .accessfn = at_e012_access, .writefn = ats_write64 },
5648     { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
5649       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
5650       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5651       .accessfn = at_e012_access, .writefn = ats_write64 },
5652     { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
5653       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
5654       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5655       .accessfn = at_e012_access, .writefn = ats_write64 },
5656     { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
5657       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
5658       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5659       .accessfn = at_e012_access, .writefn = ats_write64 },
5660     /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
5661     { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
5662       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
5663       .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5664       .writefn = ats_write64 },
5665     { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
5666       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
5667       .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5668       .writefn = ats_write64 },
5669     { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
5670       .type = ARM_CP_ALIAS,
5671       .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
5672       .access = PL1_RW, .resetvalue = 0,
5673       .fgt = FGT_PAR_EL1,
5674       .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
5675       .writefn = par_write },
5676 #endif
5677     /* TLB invalidate last level of translation table walk */
5678     { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
5679       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
5680       .writefn = tlbimva_is_write },
5681     { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
5682       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
5683       .writefn = tlbimvaa_is_write },
5684     { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
5685       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5686       .writefn = tlbimva_write },
5687     { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
5688       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5689       .writefn = tlbimvaa_write },
5690     { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
5691       .type = ARM_CP_NO_RAW, .access = PL2_W,
5692       .writefn = tlbimva_hyp_write },
5693     { .name = "TLBIMVALHIS",
5694       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
5695       .type = ARM_CP_NO_RAW, .access = PL2_W,
5696       .writefn = tlbimva_hyp_is_write },
5697     { .name = "TLBIIPAS2",
5698       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
5699       .type = ARM_CP_NO_RAW, .access = PL2_W,
5700       .writefn = tlbiipas2_hyp_write },
5701     { .name = "TLBIIPAS2IS",
5702       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
5703       .type = ARM_CP_NO_RAW, .access = PL2_W,
5704       .writefn = tlbiipas2is_hyp_write },
5705     { .name = "TLBIIPAS2L",
5706       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
5707       .type = ARM_CP_NO_RAW, .access = PL2_W,
5708       .writefn = tlbiipas2_hyp_write },
5709     { .name = "TLBIIPAS2LIS",
5710       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
5711       .type = ARM_CP_NO_RAW, .access = PL2_W,
5712       .writefn = tlbiipas2is_hyp_write },
5713     /* 32 bit cache operations */
5714     { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
5715       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab },
5716     { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
5717       .type = ARM_CP_NOP, .access = PL1_W },
5718     { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
5719       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
5720     { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
5721       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
5722     { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
5723       .type = ARM_CP_NOP, .access = PL1_W },
5724     { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
5725       .type = ARM_CP_NOP, .access = PL1_W },
5726     { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
5727       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5728     { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
5729       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5730     { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
5731       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5732     { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
5733       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5734     { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
5735       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
5736     { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
5737       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5738     { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
5739       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5740     /* MMU Domain access control / MPU write buffer control */
5741     { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
5742       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
5743       .writefn = dacr_write, .raw_writefn = raw_write,
5744       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
5745                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
5746     { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
5747       .type = ARM_CP_ALIAS,
5748       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
5749       .access = PL1_RW, .accessfn = access_nv1,
5750       .nv2_redirect_offset = 0x230 | NV2_REDIR_NV1,
5751       .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
5752     { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
5753       .type = ARM_CP_ALIAS,
5754       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
5755       .access = PL1_RW, .accessfn = access_nv1,
5756       .nv2_redirect_offset = 0x160 | NV2_REDIR_NV1,
5757       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
5758     /*
5759      * We rely on the access checks not allowing the guest to write to the
5760      * state field when SPSel indicates that it's being used as the stack
5761      * pointer.
5762      */
5763     { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
5764       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
5765       .access = PL1_RW, .accessfn = sp_el0_access,
5766       .type = ARM_CP_ALIAS,
5767       .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
5768     { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
5769       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
5770       .nv2_redirect_offset = 0x240,
5771       .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_KEEP,
5772       .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
5773     { .name = "SPSel", .state = ARM_CP_STATE_AA64,
5774       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
5775       .type = ARM_CP_NO_RAW,
5776       .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
5777     { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
5778       .type = ARM_CP_ALIAS,
5779       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
5780       .access = PL2_RW,
5781       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
5782     { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
5783       .type = ARM_CP_ALIAS,
5784       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
5785       .access = PL2_RW,
5786       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
5787     { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
5788       .type = ARM_CP_ALIAS,
5789       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
5790       .access = PL2_RW,
5791       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
5792     { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
5793       .type = ARM_CP_ALIAS,
5794       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
5795       .access = PL2_RW,
5796       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
5797     { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
5798       .type = ARM_CP_IO,
5799       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
5800       .resetvalue = 0,
5801       .access = PL3_RW,
5802       .writefn = mdcr_el3_write,
5803       .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
5804     { .name = "SDCR", .type = ARM_CP_ALIAS | ARM_CP_IO,
5805       .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
5806       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5807       .writefn = sdcr_write,
5808       .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
5809 };
5810 
5811 /* These are present only when EL1 supports AArch32 */
5812 static const ARMCPRegInfo v8_aa32_el1_reginfo[] = {
5813     { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
5814       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
5815       .access = PL2_RW,
5816       .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP,
5817       .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
5818     { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
5819       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
5820       .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
5821       .writefn = dacr_write, .raw_writefn = raw_write,
5822       .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
5823     { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
5824       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
5825       .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
5826       .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
5827 };
5828 
5829 static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
5830 {
5831     ARMCPU *cpu = env_archcpu(env);
5832 
5833     if (arm_feature(env, ARM_FEATURE_V8)) {
5834         valid_mask |= MAKE_64BIT_MASK(0, 34);  /* ARMv8.0 */
5835     } else {
5836         valid_mask |= MAKE_64BIT_MASK(0, 28);  /* ARMv7VE */
5837     }
5838 
5839     if (arm_feature(env, ARM_FEATURE_EL3)) {
5840         valid_mask &= ~HCR_HCD;
5841     } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
5842         /*
5843          * Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5844          * However, if we're using the SMC PSCI conduit then QEMU is
5845          * effectively acting like EL3 firmware and so the guest at
5846          * EL2 should retain the ability to prevent EL1 from being
5847          * able to make SMC calls into the ersatz firmware, so in
5848          * that case HCR.TSC should be read/write.
5849          */
5850         valid_mask &= ~HCR_TSC;
5851     }
5852 
5853     if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5854         if (cpu_isar_feature(aa64_vh, cpu)) {
5855             valid_mask |= HCR_E2H;
5856         }
5857         if (cpu_isar_feature(aa64_ras, cpu)) {
5858             valid_mask |= HCR_TERR | HCR_TEA;
5859         }
5860         if (cpu_isar_feature(aa64_lor, cpu)) {
5861             valid_mask |= HCR_TLOR;
5862         }
5863         if (cpu_isar_feature(aa64_pauth, cpu)) {
5864             valid_mask |= HCR_API | HCR_APK;
5865         }
5866         if (cpu_isar_feature(aa64_mte, cpu)) {
5867             valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
5868         }
5869         if (cpu_isar_feature(aa64_scxtnum, cpu)) {
5870             valid_mask |= HCR_ENSCXT;
5871         }
5872         if (cpu_isar_feature(aa64_fwb, cpu)) {
5873             valid_mask |= HCR_FWB;
5874         }
5875         if (cpu_isar_feature(aa64_rme, cpu)) {
5876             valid_mask |= HCR_GPF;
5877         }
5878         if (cpu_isar_feature(aa64_nv, cpu)) {
5879             valid_mask |= HCR_NV | HCR_NV1 | HCR_AT;
5880         }
5881         if (cpu_isar_feature(aa64_nv2, cpu)) {
5882             valid_mask |= HCR_NV2;
5883         }
5884     }
5885 
5886     if (cpu_isar_feature(any_evt, cpu)) {
5887         valid_mask |= HCR_TTLBIS | HCR_TTLBOS | HCR_TICAB | HCR_TOCU | HCR_TID4;
5888     } else if (cpu_isar_feature(any_half_evt, cpu)) {
5889         valid_mask |= HCR_TICAB | HCR_TOCU | HCR_TID4;
5890     }
5891 
5892     /* Clear RES0 bits.  */
5893     value &= valid_mask;
5894 
5895     /*
5896      * These bits change the MMU setup:
5897      * HCR_VM enables stage 2 translation
5898      * HCR_PTW forbids certain page-table setups
5899      * HCR_DC disables stage1 and enables stage2 translation
5900      * HCR_DCT enables tagging on (disabled) stage1 translation
5901      * HCR_FWB changes the interpretation of stage2 descriptor bits
5902      * HCR_NV and HCR_NV1 affect interpretation of descriptor bits
5903      */
5904     if ((env->cp15.hcr_el2 ^ value) &
5905         (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB | HCR_NV | HCR_NV1)) {
5906         tlb_flush(CPU(cpu));
5907     }
5908     env->cp15.hcr_el2 = value;
5909 
5910     /*
5911      * Updates to VI and VF require us to update the status of
5912      * virtual interrupts, which are the logical OR of these bits
5913      * and the state of the input lines from the GIC. (This requires
5914      * that we have the BQL, which is done by marking the
5915      * reginfo structs as ARM_CP_IO.)
5916      * Note that if a write to HCR pends a VIRQ or VFIQ it is never
5917      * possible for it to be taken immediately, because VIRQ and
5918      * VFIQ are masked unless running at EL0 or EL1, and HCR
5919      * can only be written at EL2.
5920      */
5921     g_assert(bql_locked());
5922     arm_cpu_update_virq(cpu);
5923     arm_cpu_update_vfiq(cpu);
5924     arm_cpu_update_vserr(cpu);
5925 }
5926 
5927 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
5928 {
5929     do_hcr_write(env, value, 0);
5930 }
5931 
5932 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
5933                           uint64_t value)
5934 {
5935     /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
5936     value = deposit64(env->cp15.hcr_el2, 32, 32, value);
5937     do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
5938 }
5939 
5940 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
5941                          uint64_t value)
5942 {
5943     /* Handle HCR write, i.e. write to low half of HCR_EL2 */
5944     value = deposit64(env->cp15.hcr_el2, 0, 32, value);
5945     do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
5946 }
5947 
5948 /*
5949  * Return the effective value of HCR_EL2, at the given security state.
5950  * Bits that are not included here:
5951  * RW       (read from SCR_EL3.RW as needed)
5952  */
5953 uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space)
5954 {
5955     uint64_t ret = env->cp15.hcr_el2;
5956 
5957     assert(space != ARMSS_Root);
5958 
5959     if (!arm_is_el2_enabled_secstate(env, space)) {
5960         /*
5961          * "This register has no effect if EL2 is not enabled in the
5962          * current Security state".  This is ARMv8.4-SecEL2 speak for
5963          * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
5964          *
5965          * Prior to that, the language was "In an implementation that
5966          * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
5967          * as if this field is 0 for all purposes other than a direct
5968          * read or write access of HCR_EL2".  With lots of enumeration
5969          * on a per-field basis.  In current QEMU, this is condition
5970          * is arm_is_secure_below_el3.
5971          *
5972          * Since the v8.4 language applies to the entire register, and
5973          * appears to be backward compatible, use that.
5974          */
5975         return 0;
5976     }
5977 
5978     /*
5979      * For a cpu that supports both aarch64 and aarch32, we can set bits
5980      * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
5981      * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
5982      */
5983     if (!arm_el_is_aa64(env, 2)) {
5984         uint64_t aa32_valid;
5985 
5986         /*
5987          * These bits are up-to-date as of ARMv8.6.
5988          * For HCR, it's easiest to list just the 2 bits that are invalid.
5989          * For HCR2, list those that are valid.
5990          */
5991         aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
5992         aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
5993                        HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
5994         ret &= aa32_valid;
5995     }
5996 
5997     if (ret & HCR_TGE) {
5998         /* These bits are up-to-date as of ARMv8.6.  */
5999         if (ret & HCR_E2H) {
6000             ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
6001                      HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
6002                      HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
6003                      HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
6004                      HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
6005                      HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
6006         } else {
6007             ret |= HCR_FMO | HCR_IMO | HCR_AMO;
6008         }
6009         ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
6010                  HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
6011                  HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
6012                  HCR_TLOR);
6013     }
6014 
6015     return ret;
6016 }
6017 
6018 uint64_t arm_hcr_el2_eff(CPUARMState *env)
6019 {
6020     if (arm_feature(env, ARM_FEATURE_M)) {
6021         return 0;
6022     }
6023     return arm_hcr_el2_eff_secstate(env, arm_security_space_below_el3(env));
6024 }
6025 
6026 /*
6027  * Corresponds to ARM pseudocode function ELIsInHost().
6028  */
6029 bool el_is_in_host(CPUARMState *env, int el)
6030 {
6031     uint64_t mask;
6032 
6033     /*
6034      * Since we only care about E2H and TGE, we can skip arm_hcr_el2_eff().
6035      * Perform the simplest bit tests first, and validate EL2 afterward.
6036      */
6037     if (el & 1) {
6038         return false; /* EL1 or EL3 */
6039     }
6040 
6041     /*
6042      * Note that hcr_write() checks isar_feature_aa64_vh(),
6043      * aka HaveVirtHostExt(), in allowing HCR_E2H to be set.
6044      */
6045     mask = el ? HCR_E2H : HCR_E2H | HCR_TGE;
6046     if ((env->cp15.hcr_el2 & mask) != mask) {
6047         return false;
6048     }
6049 
6050     /* TGE and/or E2H set: double check those bits are currently legal. */
6051     return arm_is_el2_enabled(env) && arm_el_is_aa64(env, 2);
6052 }
6053 
6054 static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
6055                        uint64_t value)
6056 {
6057     uint64_t valid_mask = 0;
6058 
6059     /* FEAT_MOPS adds MSCEn and MCE2 */
6060     if (cpu_isar_feature(aa64_mops, env_archcpu(env))) {
6061         valid_mask |= HCRX_MSCEN | HCRX_MCE2;
6062     }
6063 
6064     /* Clear RES0 bits.  */
6065     env->cp15.hcrx_el2 = value & valid_mask;
6066 }
6067 
6068 static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
6069                                   bool isread)
6070 {
6071     if (arm_current_el(env) == 2
6072         && arm_feature(env, ARM_FEATURE_EL3)
6073         && !(env->cp15.scr_el3 & SCR_HXEN)) {
6074         return CP_ACCESS_TRAP_EL3;
6075     }
6076     return CP_ACCESS_OK;
6077 }
6078 
6079 static const ARMCPRegInfo hcrx_el2_reginfo = {
6080     .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
6081     .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2,
6082     .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen,
6083     .nv2_redirect_offset = 0xa0,
6084     .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2),
6085 };
6086 
6087 /* Return the effective value of HCRX_EL2.  */
6088 uint64_t arm_hcrx_el2_eff(CPUARMState *env)
6089 {
6090     /*
6091      * The bits in this register behave as 0 for all purposes other than
6092      * direct reads of the register if SCR_EL3.HXEn is 0.
6093      * If EL2 is not enabled in the current security state, then the
6094      * bit may behave as if 0, or as if 1, depending on the bit.
6095      * For the moment, we treat the EL2-disabled case as taking
6096      * priority over the HXEn-disabled case. This is true for the only
6097      * bit for a feature which we implement where the answer is different
6098      * for the two cases (MSCEn for FEAT_MOPS).
6099      * This may need to be revisited for future bits.
6100      */
6101     if (!arm_is_el2_enabled(env)) {
6102         uint64_t hcrx = 0;
6103         if (cpu_isar_feature(aa64_mops, env_archcpu(env))) {
6104             /* MSCEn behaves as 1 if EL2 is not enabled */
6105             hcrx |= HCRX_MSCEN;
6106         }
6107         return hcrx;
6108     }
6109     if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) {
6110         return 0;
6111     }
6112     return env->cp15.hcrx_el2;
6113 }
6114 
6115 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
6116                            uint64_t value)
6117 {
6118     /*
6119      * For A-profile AArch32 EL3, if NSACR.CP10
6120      * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
6121      */
6122     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
6123         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
6124         uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
6125         value = (value & ~mask) | (env->cp15.cptr_el[2] & mask);
6126     }
6127     env->cp15.cptr_el[2] = value;
6128 }
6129 
6130 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
6131 {
6132     /*
6133      * For A-profile AArch32 EL3, if NSACR.CP10
6134      * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
6135      */
6136     uint64_t value = env->cp15.cptr_el[2];
6137 
6138     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
6139         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
6140         value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
6141     }
6142     return value;
6143 }
6144 
6145 static const ARMCPRegInfo el2_cp_reginfo[] = {
6146     { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
6147       .type = ARM_CP_IO,
6148       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
6149       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
6150       .nv2_redirect_offset = 0x78,
6151       .writefn = hcr_write, .raw_writefn = raw_write },
6152     { .name = "HCR", .state = ARM_CP_STATE_AA32,
6153       .type = ARM_CP_ALIAS | ARM_CP_IO,
6154       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
6155       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
6156       .writefn = hcr_writelow },
6157     { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
6158       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
6159       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
6160     { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
6161       .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
6162       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
6163       .access = PL2_RW,
6164       .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
6165     { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
6166       .type = ARM_CP_NV2_REDIRECT,
6167       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
6168       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
6169     { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
6170       .type = ARM_CP_NV2_REDIRECT,
6171       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
6172       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
6173     { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
6174       .type = ARM_CP_ALIAS,
6175       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
6176       .access = PL2_RW,
6177       .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
6178     { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
6179       .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
6180       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
6181       .access = PL2_RW,
6182       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
6183     { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
6184       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
6185       .access = PL2_RW, .writefn = vbar_write,
6186       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
6187       .resetvalue = 0 },
6188     { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
6189       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
6190       .access = PL3_RW, .type = ARM_CP_ALIAS,
6191       .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
6192     { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
6193       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
6194       .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
6195       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
6196       .readfn = cptr_el2_read, .writefn = cptr_el2_write },
6197     { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
6198       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
6199       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
6200       .resetvalue = 0 },
6201     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
6202       .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
6203       .access = PL2_RW, .type = ARM_CP_ALIAS,
6204       .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
6205     { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
6206       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
6207       .access = PL2_RW, .type = ARM_CP_CONST,
6208       .resetvalue = 0 },
6209     /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
6210     { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
6211       .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
6212       .access = PL2_RW, .type = ARM_CP_CONST,
6213       .resetvalue = 0 },
6214     { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
6215       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
6216       .access = PL2_RW, .type = ARM_CP_CONST,
6217       .resetvalue = 0 },
6218     { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
6219       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
6220       .access = PL2_RW, .type = ARM_CP_CONST,
6221       .resetvalue = 0 },
6222     { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
6223       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
6224       .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
6225       .raw_writefn = raw_write,
6226       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
6227     { .name = "VTCR", .state = ARM_CP_STATE_AA32,
6228       .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
6229       .type = ARM_CP_ALIAS,
6230       .access = PL2_RW, .accessfn = access_el3_aa32ns,
6231       .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) },
6232     { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
6233       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
6234       .access = PL2_RW,
6235       .nv2_redirect_offset = 0x40,
6236       /* no .writefn needed as this can't cause an ASID change */
6237       .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
6238     { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
6239       .cp = 15, .opc1 = 6, .crm = 2,
6240       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
6241       .access = PL2_RW, .accessfn = access_el3_aa32ns,
6242       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
6243       .writefn = vttbr_write, .raw_writefn = raw_write },
6244     { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
6245       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
6246       .access = PL2_RW, .writefn = vttbr_write, .raw_writefn = raw_write,
6247       .nv2_redirect_offset = 0x20,
6248       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
6249     { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
6250       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
6251       .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
6252       .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
6253     { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
6254       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
6255       .access = PL2_RW, .resetvalue = 0,
6256       .nv2_redirect_offset = 0x90,
6257       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
6258     { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
6259       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
6260       .access = PL2_RW, .resetvalue = 0,
6261       .writefn = vmsa_tcr_ttbr_el2_write, .raw_writefn = raw_write,
6262       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
6263     { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
6264       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
6265       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
6266     { .name = "TLBIALLNSNH",
6267       .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
6268       .type = ARM_CP_NO_RAW, .access = PL2_W,
6269       .writefn = tlbiall_nsnh_write },
6270     { .name = "TLBIALLNSNHIS",
6271       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
6272       .type = ARM_CP_NO_RAW, .access = PL2_W,
6273       .writefn = tlbiall_nsnh_is_write },
6274     { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
6275       .type = ARM_CP_NO_RAW, .access = PL2_W,
6276       .writefn = tlbiall_hyp_write },
6277     { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
6278       .type = ARM_CP_NO_RAW, .access = PL2_W,
6279       .writefn = tlbiall_hyp_is_write },
6280     { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
6281       .type = ARM_CP_NO_RAW, .access = PL2_W,
6282       .writefn = tlbimva_hyp_write },
6283     { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
6284       .type = ARM_CP_NO_RAW, .access = PL2_W,
6285       .writefn = tlbimva_hyp_is_write },
6286     { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
6287       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
6288       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6289       .writefn = tlbi_aa64_alle2_write },
6290     { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
6291       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
6292       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6293       .writefn = tlbi_aa64_vae2_write },
6294     { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
6295       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
6296       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6297       .writefn = tlbi_aa64_vae2_write },
6298     { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
6299       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
6300       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6301       .writefn = tlbi_aa64_alle2is_write },
6302     { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
6303       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
6304       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6305       .writefn = tlbi_aa64_vae2is_write },
6306     { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
6307       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
6308       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6309       .writefn = tlbi_aa64_vae2is_write },
6310 #ifndef CONFIG_USER_ONLY
6311     /*
6312      * Unlike the other EL2-related AT operations, these must
6313      * UNDEF from EL3 if EL2 is not implemented, which is why we
6314      * define them here rather than with the rest of the AT ops.
6315      */
6316     { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
6317       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
6318       .access = PL2_W, .accessfn = at_s1e2_access,
6319       .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
6320       .writefn = ats_write64 },
6321     { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
6322       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
6323       .access = PL2_W, .accessfn = at_s1e2_access,
6324       .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
6325       .writefn = ats_write64 },
6326     /*
6327      * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
6328      * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
6329      * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
6330      * to behave as if SCR.NS was 1.
6331      */
6332     { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
6333       .access = PL2_W,
6334       .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
6335     { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
6336       .access = PL2_W,
6337       .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
6338     { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
6339       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
6340       /*
6341        * ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
6342        * reset values as IMPDEF. We choose to reset to 3 to comply with
6343        * both ARMv7 and ARMv8.
6344        */
6345       .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 3,
6346       .writefn = gt_cnthctl_write, .raw_writefn = raw_write,
6347       .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
6348     { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
6349       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
6350       .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
6351       .writefn = gt_cntvoff_write,
6352       .nv2_redirect_offset = 0x60,
6353       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
6354     { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
6355       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
6356       .writefn = gt_cntvoff_write,
6357       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
6358     { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
6359       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
6360       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
6361       .type = ARM_CP_IO, .access = PL2_RW,
6362       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
6363     { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
6364       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
6365       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
6366       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
6367     { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
6368       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
6369       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
6370       .resetfn = gt_hyp_timer_reset,
6371       .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
6372     { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
6373       .type = ARM_CP_IO,
6374       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
6375       .access = PL2_RW,
6376       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
6377       .resetvalue = 0,
6378       .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
6379 #endif
6380     { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
6381       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
6382       .access = PL2_RW, .accessfn = access_el3_aa32ns,
6383       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
6384     { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
6385       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
6386       .access = PL2_RW,
6387       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
6388     { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
6389       .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
6390       .access = PL2_RW,
6391       .nv2_redirect_offset = 0x80,
6392       .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
6393 };
6394 
6395 static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
6396     { .name = "HCR2", .state = ARM_CP_STATE_AA32,
6397       .type = ARM_CP_ALIAS | ARM_CP_IO,
6398       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
6399       .access = PL2_RW,
6400       .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
6401       .writefn = hcr_writehigh },
6402 };
6403 
6404 static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
6405                                   bool isread)
6406 {
6407     if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
6408         return CP_ACCESS_OK;
6409     }
6410     return CP_ACCESS_TRAP_UNCATEGORIZED;
6411 }
6412 
6413 static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
6414     { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
6415       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
6416       .access = PL2_RW, .accessfn = sel2_access,
6417       .nv2_redirect_offset = 0x30,
6418       .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
6419     { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
6420       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
6421       .access = PL2_RW, .accessfn = sel2_access,
6422       .nv2_redirect_offset = 0x48,
6423       .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
6424 };
6425 
6426 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
6427                                    bool isread)
6428 {
6429     /*
6430      * The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
6431      * At Secure EL1 it traps to EL3 or EL2.
6432      */
6433     if (arm_current_el(env) == 3) {
6434         return CP_ACCESS_OK;
6435     }
6436     if (arm_is_secure_below_el3(env)) {
6437         if (env->cp15.scr_el3 & SCR_EEL2) {
6438             return CP_ACCESS_TRAP_EL2;
6439         }
6440         return CP_ACCESS_TRAP_EL3;
6441     }
6442     /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
6443     if (isread) {
6444         return CP_ACCESS_OK;
6445     }
6446     return CP_ACCESS_TRAP_UNCATEGORIZED;
6447 }
6448 
6449 static const ARMCPRegInfo el3_cp_reginfo[] = {
6450     { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
6451       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
6452       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
6453       .resetfn = scr_reset, .writefn = scr_write, .raw_writefn = raw_write },
6454     { .name = "SCR",  .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
6455       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
6456       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
6457       .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
6458       .writefn = scr_write, .raw_writefn = raw_write },
6459     { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
6460       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
6461       .access = PL3_RW, .resetvalue = 0,
6462       .fieldoffset = offsetof(CPUARMState, cp15.sder) },
6463     { .name = "SDER",
6464       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
6465       .access = PL3_RW, .resetvalue = 0,
6466       .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
6467     { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
6468       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
6469       .writefn = vbar_write, .resetvalue = 0,
6470       .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
6471     { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
6472       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
6473       .access = PL3_RW, .resetvalue = 0,
6474       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
6475     { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
6476       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
6477       .access = PL3_RW,
6478       /* no .writefn needed as this can't cause an ASID change */
6479       .resetvalue = 0,
6480       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
6481     { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
6482       .type = ARM_CP_ALIAS,
6483       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
6484       .access = PL3_RW,
6485       .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
6486     { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
6487       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
6488       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
6489     { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
6490       .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
6491       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
6492     { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
6493       .type = ARM_CP_ALIAS,
6494       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
6495       .access = PL3_RW,
6496       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
6497     { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
6498       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
6499       .access = PL3_RW, .writefn = vbar_write,
6500       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
6501       .resetvalue = 0 },
6502     { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
6503       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
6504       .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
6505       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
6506     { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
6507       .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
6508       .access = PL3_RW, .resetvalue = 0,
6509       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
6510     { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
6511       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
6512       .access = PL3_RW, .type = ARM_CP_CONST,
6513       .resetvalue = 0 },
6514     { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
6515       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
6516       .access = PL3_RW, .type = ARM_CP_CONST,
6517       .resetvalue = 0 },
6518     { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
6519       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
6520       .access = PL3_RW, .type = ARM_CP_CONST,
6521       .resetvalue = 0 },
6522     { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
6523       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
6524       .access = PL3_W, .type = ARM_CP_NO_RAW,
6525       .writefn = tlbi_aa64_alle3is_write },
6526     { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
6527       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
6528       .access = PL3_W, .type = ARM_CP_NO_RAW,
6529       .writefn = tlbi_aa64_vae3is_write },
6530     { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
6531       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
6532       .access = PL3_W, .type = ARM_CP_NO_RAW,
6533       .writefn = tlbi_aa64_vae3is_write },
6534     { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
6535       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
6536       .access = PL3_W, .type = ARM_CP_NO_RAW,
6537       .writefn = tlbi_aa64_alle3_write },
6538     { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
6539       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
6540       .access = PL3_W, .type = ARM_CP_NO_RAW,
6541       .writefn = tlbi_aa64_vae3_write },
6542     { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
6543       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
6544       .access = PL3_W, .type = ARM_CP_NO_RAW,
6545       .writefn = tlbi_aa64_vae3_write },
6546 };
6547 
6548 #ifndef CONFIG_USER_ONLY
6549 /* Test if system register redirection is to occur in the current state.  */
6550 static bool redirect_for_e2h(CPUARMState *env)
6551 {
6552     return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
6553 }
6554 
6555 static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
6556 {
6557     CPReadFn *readfn;
6558 
6559     if (redirect_for_e2h(env)) {
6560         /* Switch to the saved EL2 version of the register.  */
6561         ri = ri->opaque;
6562         readfn = ri->readfn;
6563     } else {
6564         readfn = ri->orig_readfn;
6565     }
6566     if (readfn == NULL) {
6567         readfn = raw_read;
6568     }
6569     return readfn(env, ri);
6570 }
6571 
6572 static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
6573                           uint64_t value)
6574 {
6575     CPWriteFn *writefn;
6576 
6577     if (redirect_for_e2h(env)) {
6578         /* Switch to the saved EL2 version of the register.  */
6579         ri = ri->opaque;
6580         writefn = ri->writefn;
6581     } else {
6582         writefn = ri->orig_writefn;
6583     }
6584     if (writefn == NULL) {
6585         writefn = raw_write;
6586     }
6587     writefn(env, ri, value);
6588 }
6589 
6590 static uint64_t el2_e2h_e12_read(CPUARMState *env, const ARMCPRegInfo *ri)
6591 {
6592     /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
6593     return ri->orig_readfn(env, ri->opaque);
6594 }
6595 
6596 static void el2_e2h_e12_write(CPUARMState *env, const ARMCPRegInfo *ri,
6597                               uint64_t value)
6598 {
6599     /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
6600     return ri->orig_writefn(env, ri->opaque, value);
6601 }
6602 
6603 static CPAccessResult el2_e2h_e12_access(CPUARMState *env,
6604                                          const ARMCPRegInfo *ri,
6605                                          bool isread)
6606 {
6607     if (arm_current_el(env) == 1) {
6608         /*
6609          * This must be a FEAT_NV access (will either trap or redirect
6610          * to memory). None of the registers with _EL12 aliases want to
6611          * apply their trap controls for this kind of access, so don't
6612          * call the orig_accessfn or do the "UNDEF when E2H is 0" check.
6613          */
6614         return CP_ACCESS_OK;
6615     }
6616     /* FOO_EL12 aliases only exist when E2H is 1; otherwise they UNDEF */
6617     if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
6618         return CP_ACCESS_TRAP_UNCATEGORIZED;
6619     }
6620     if (ri->orig_accessfn) {
6621         return ri->orig_accessfn(env, ri->opaque, isread);
6622     }
6623     return CP_ACCESS_OK;
6624 }
6625 
6626 static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
6627 {
6628     struct E2HAlias {
6629         uint32_t src_key, dst_key, new_key;
6630         const char *src_name, *dst_name, *new_name;
6631         bool (*feature)(const ARMISARegisters *id);
6632     };
6633 
6634 #define K(op0, op1, crn, crm, op2) \
6635     ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
6636 
6637     static const struct E2HAlias aliases[] = {
6638         { K(3, 0,  1, 0, 0), K(3, 4,  1, 0, 0), K(3, 5, 1, 0, 0),
6639           "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
6640         { K(3, 0,  1, 0, 2), K(3, 4,  1, 1, 2), K(3, 5, 1, 0, 2),
6641           "CPACR", "CPTR_EL2", "CPACR_EL12" },
6642         { K(3, 0,  2, 0, 0), K(3, 4,  2, 0, 0), K(3, 5, 2, 0, 0),
6643           "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
6644         { K(3, 0,  2, 0, 1), K(3, 4,  2, 0, 1), K(3, 5, 2, 0, 1),
6645           "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
6646         { K(3, 0,  2, 0, 2), K(3, 4,  2, 0, 2), K(3, 5, 2, 0, 2),
6647           "TCR_EL1", "TCR_EL2", "TCR_EL12" },
6648         { K(3, 0,  4, 0, 0), K(3, 4,  4, 0, 0), K(3, 5, 4, 0, 0),
6649           "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
6650         { K(3, 0,  4, 0, 1), K(3, 4,  4, 0, 1), K(3, 5, 4, 0, 1),
6651           "ELR_EL1", "ELR_EL2", "ELR_EL12" },
6652         { K(3, 0,  5, 1, 0), K(3, 4,  5, 1, 0), K(3, 5, 5, 1, 0),
6653           "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
6654         { K(3, 0,  5, 1, 1), K(3, 4,  5, 1, 1), K(3, 5, 5, 1, 1),
6655           "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
6656         { K(3, 0,  5, 2, 0), K(3, 4,  5, 2, 0), K(3, 5, 5, 2, 0),
6657           "ESR_EL1", "ESR_EL2", "ESR_EL12" },
6658         { K(3, 0,  6, 0, 0), K(3, 4,  6, 0, 0), K(3, 5, 6, 0, 0),
6659           "FAR_EL1", "FAR_EL2", "FAR_EL12" },
6660         { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
6661           "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
6662         { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
6663           "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
6664         { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
6665           "VBAR", "VBAR_EL2", "VBAR_EL12" },
6666         { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
6667           "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
6668         { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
6669           "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
6670 
6671         /*
6672          * Note that redirection of ZCR is mentioned in the description
6673          * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
6674          * not in the summary table.
6675          */
6676         { K(3, 0,  1, 2, 0), K(3, 4,  1, 2, 0), K(3, 5, 1, 2, 0),
6677           "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
6678         { K(3, 0,  1, 2, 6), K(3, 4,  1, 2, 6), K(3, 5, 1, 2, 6),
6679           "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme },
6680 
6681         { K(3, 0,  5, 6, 0), K(3, 4,  5, 6, 0), K(3, 5, 5, 6, 0),
6682           "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
6683 
6684         { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7),
6685           "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12",
6686           isar_feature_aa64_scxtnum },
6687 
6688         /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
6689         /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
6690     };
6691 #undef K
6692 
6693     size_t i;
6694 
6695     for (i = 0; i < ARRAY_SIZE(aliases); i++) {
6696         const struct E2HAlias *a = &aliases[i];
6697         ARMCPRegInfo *src_reg, *dst_reg, *new_reg;
6698         bool ok;
6699 
6700         if (a->feature && !a->feature(&cpu->isar)) {
6701             continue;
6702         }
6703 
6704         src_reg = g_hash_table_lookup(cpu->cp_regs,
6705                                       (gpointer)(uintptr_t)a->src_key);
6706         dst_reg = g_hash_table_lookup(cpu->cp_regs,
6707                                       (gpointer)(uintptr_t)a->dst_key);
6708         g_assert(src_reg != NULL);
6709         g_assert(dst_reg != NULL);
6710 
6711         /* Cross-compare names to detect typos in the keys.  */
6712         g_assert(strcmp(src_reg->name, a->src_name) == 0);
6713         g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
6714 
6715         /* None of the core system registers use opaque; we will.  */
6716         g_assert(src_reg->opaque == NULL);
6717 
6718         /* Create alias before redirection so we dup the right data. */
6719         new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
6720 
6721         new_reg->name = a->new_name;
6722         new_reg->type |= ARM_CP_ALIAS;
6723         /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place.  */
6724         new_reg->access &= PL2_RW | PL3_RW;
6725         /* The new_reg op fields are as per new_key, not the target reg */
6726         new_reg->crn = (a->new_key & CP_REG_ARM64_SYSREG_CRN_MASK)
6727             >> CP_REG_ARM64_SYSREG_CRN_SHIFT;
6728         new_reg->crm = (a->new_key & CP_REG_ARM64_SYSREG_CRM_MASK)
6729             >> CP_REG_ARM64_SYSREG_CRM_SHIFT;
6730         new_reg->opc0 = (a->new_key & CP_REG_ARM64_SYSREG_OP0_MASK)
6731             >> CP_REG_ARM64_SYSREG_OP0_SHIFT;
6732         new_reg->opc1 = (a->new_key & CP_REG_ARM64_SYSREG_OP1_MASK)
6733             >> CP_REG_ARM64_SYSREG_OP1_SHIFT;
6734         new_reg->opc2 = (a->new_key & CP_REG_ARM64_SYSREG_OP2_MASK)
6735             >> CP_REG_ARM64_SYSREG_OP2_SHIFT;
6736         new_reg->opaque = src_reg;
6737         new_reg->orig_readfn = src_reg->readfn ?: raw_read;
6738         new_reg->orig_writefn = src_reg->writefn ?: raw_write;
6739         new_reg->orig_accessfn = src_reg->accessfn;
6740         if (!new_reg->raw_readfn) {
6741             new_reg->raw_readfn = raw_read;
6742         }
6743         if (!new_reg->raw_writefn) {
6744             new_reg->raw_writefn = raw_write;
6745         }
6746         new_reg->readfn = el2_e2h_e12_read;
6747         new_reg->writefn = el2_e2h_e12_write;
6748         new_reg->accessfn = el2_e2h_e12_access;
6749 
6750         /*
6751          * If the _EL1 register is redirected to memory by FEAT_NV2,
6752          * then it shares the offset with the _EL12 register,
6753          * and which one is redirected depends on HCR_EL2.NV1.
6754          */
6755         if (new_reg->nv2_redirect_offset) {
6756             assert(new_reg->nv2_redirect_offset & NV2_REDIR_NV1);
6757             new_reg->nv2_redirect_offset &= ~NV2_REDIR_NV1;
6758             new_reg->nv2_redirect_offset |= NV2_REDIR_NO_NV1;
6759         }
6760 
6761         ok = g_hash_table_insert(cpu->cp_regs,
6762                                  (gpointer)(uintptr_t)a->new_key, new_reg);
6763         g_assert(ok);
6764 
6765         src_reg->opaque = dst_reg;
6766         src_reg->orig_readfn = src_reg->readfn ?: raw_read;
6767         src_reg->orig_writefn = src_reg->writefn ?: raw_write;
6768         if (!src_reg->raw_readfn) {
6769             src_reg->raw_readfn = raw_read;
6770         }
6771         if (!src_reg->raw_writefn) {
6772             src_reg->raw_writefn = raw_write;
6773         }
6774         src_reg->readfn = el2_e2h_read;
6775         src_reg->writefn = el2_e2h_write;
6776     }
6777 }
6778 #endif
6779 
6780 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
6781                                      bool isread)
6782 {
6783     int cur_el = arm_current_el(env);
6784 
6785     if (cur_el < 2) {
6786         uint64_t hcr = arm_hcr_el2_eff(env);
6787 
6788         if (cur_el == 0) {
6789             if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
6790                 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
6791                     return CP_ACCESS_TRAP_EL2;
6792                 }
6793             } else {
6794                 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
6795                     return CP_ACCESS_TRAP;
6796                 }
6797                 if (hcr & HCR_TID2) {
6798                     return CP_ACCESS_TRAP_EL2;
6799                 }
6800             }
6801         } else if (hcr & HCR_TID2) {
6802             return CP_ACCESS_TRAP_EL2;
6803         }
6804     }
6805 
6806     if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
6807         return CP_ACCESS_TRAP_EL2;
6808     }
6809 
6810     return CP_ACCESS_OK;
6811 }
6812 
6813 /*
6814  * Check for traps to RAS registers, which are controlled
6815  * by HCR_EL2.TERR and SCR_EL3.TERR.
6816  */
6817 static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri,
6818                                   bool isread)
6819 {
6820     int el = arm_current_el(env);
6821 
6822     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) {
6823         return CP_ACCESS_TRAP_EL2;
6824     }
6825     if (el < 3 && (env->cp15.scr_el3 & SCR_TERR)) {
6826         return CP_ACCESS_TRAP_EL3;
6827     }
6828     return CP_ACCESS_OK;
6829 }
6830 
6831 static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri)
6832 {
6833     int el = arm_current_el(env);
6834 
6835     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
6836         return env->cp15.vdisr_el2;
6837     }
6838     if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
6839         return 0; /* RAZ/WI */
6840     }
6841     return env->cp15.disr_el1;
6842 }
6843 
6844 static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
6845 {
6846     int el = arm_current_el(env);
6847 
6848     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
6849         env->cp15.vdisr_el2 = val;
6850         return;
6851     }
6852     if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
6853         return; /* RAZ/WI */
6854     }
6855     env->cp15.disr_el1 = val;
6856 }
6857 
6858 /*
6859  * Minimal RAS implementation with no Error Records.
6860  * Which means that all of the Error Record registers:
6861  *   ERXADDR_EL1
6862  *   ERXCTLR_EL1
6863  *   ERXFR_EL1
6864  *   ERXMISC0_EL1
6865  *   ERXMISC1_EL1
6866  *   ERXMISC2_EL1
6867  *   ERXMISC3_EL1
6868  *   ERXPFGCDN_EL1  (RASv1p1)
6869  *   ERXPFGCTL_EL1  (RASv1p1)
6870  *   ERXPFGF_EL1    (RASv1p1)
6871  *   ERXSTATUS_EL1
6872  * and
6873  *   ERRSELR_EL1
6874  * may generate UNDEFINED, which is the effect we get by not
6875  * listing them at all.
6876  *
6877  * These registers have fine-grained trap bits, but UNDEF-to-EL1
6878  * is higher priority than FGT-to-EL2 so we do not need to list them
6879  * in order to check for an FGT.
6880  */
6881 static const ARMCPRegInfo minimal_ras_reginfo[] = {
6882     { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH,
6883       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1,
6884       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1),
6885       .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write },
6886     { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH,
6887       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0,
6888       .access = PL1_R, .accessfn = access_terr,
6889       .fgt = FGT_ERRIDR_EL1,
6890       .type = ARM_CP_CONST, .resetvalue = 0 },
6891     { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH,
6892       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1,
6893       .nv2_redirect_offset = 0x500,
6894       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) },
6895     { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH,
6896       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3,
6897       .nv2_redirect_offset = 0x508,
6898       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) },
6899 };
6900 
6901 /*
6902  * Return the exception level to which exceptions should be taken
6903  * via SVEAccessTrap.  This excludes the check for whether the exception
6904  * should be routed through AArch64.AdvSIMDFPAccessTrap.  That can easily
6905  * be found by testing 0 < fp_exception_el < sve_exception_el.
6906  *
6907  * C.f. the ARM pseudocode function CheckSVEEnabled.  Note that the
6908  * pseudocode does *not* separate out the FP trap checks, but has them
6909  * all in one function.
6910  */
6911 int sve_exception_el(CPUARMState *env, int el)
6912 {
6913 #ifndef CONFIG_USER_ONLY
6914     if (el <= 1 && !el_is_in_host(env, el)) {
6915         switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) {
6916         case 1:
6917             if (el != 0) {
6918                 break;
6919             }
6920             /* fall through */
6921         case 0:
6922         case 2:
6923             return 1;
6924         }
6925     }
6926 
6927     if (el <= 2 && arm_is_el2_enabled(env)) {
6928         /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
6929         if (env->cp15.hcr_el2 & HCR_E2H) {
6930             switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) {
6931             case 1:
6932                 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
6933                     break;
6934                 }
6935                 /* fall through */
6936             case 0:
6937             case 2:
6938                 return 2;
6939             }
6940         } else {
6941             if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) {
6942                 return 2;
6943             }
6944         }
6945     }
6946 
6947     /* CPTR_EL3.  Since EZ is negative we must check for EL3.  */
6948     if (arm_feature(env, ARM_FEATURE_EL3)
6949         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) {
6950         return 3;
6951     }
6952 #endif
6953     return 0;
6954 }
6955 
6956 /*
6957  * Return the exception level to which exceptions should be taken for SME.
6958  * C.f. the ARM pseudocode function CheckSMEAccess.
6959  */
6960 int sme_exception_el(CPUARMState *env, int el)
6961 {
6962 #ifndef CONFIG_USER_ONLY
6963     if (el <= 1 && !el_is_in_host(env, el)) {
6964         switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) {
6965         case 1:
6966             if (el != 0) {
6967                 break;
6968             }
6969             /* fall through */
6970         case 0:
6971         case 2:
6972             return 1;
6973         }
6974     }
6975 
6976     if (el <= 2 && arm_is_el2_enabled(env)) {
6977         /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
6978         if (env->cp15.hcr_el2 & HCR_E2H) {
6979             switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) {
6980             case 1:
6981                 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
6982                     break;
6983                 }
6984                 /* fall through */
6985             case 0:
6986             case 2:
6987                 return 2;
6988             }
6989         } else {
6990             if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) {
6991                 return 2;
6992             }
6993         }
6994     }
6995 
6996     /* CPTR_EL3.  Since ESM is negative we must check for EL3.  */
6997     if (arm_feature(env, ARM_FEATURE_EL3)
6998         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
6999         return 3;
7000     }
7001 #endif
7002     return 0;
7003 }
7004 
7005 /*
7006  * Given that SVE is enabled, return the vector length for EL.
7007  */
7008 uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm)
7009 {
7010     ARMCPU *cpu = env_archcpu(env);
7011     uint64_t *cr = env->vfp.zcr_el;
7012     uint32_t map = cpu->sve_vq.map;
7013     uint32_t len = ARM_MAX_VQ - 1;
7014 
7015     if (sm) {
7016         cr = env->vfp.smcr_el;
7017         map = cpu->sme_vq.map;
7018     }
7019 
7020     if (el <= 1 && !el_is_in_host(env, el)) {
7021         len = MIN(len, 0xf & (uint32_t)cr[1]);
7022     }
7023     if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
7024         len = MIN(len, 0xf & (uint32_t)cr[2]);
7025     }
7026     if (arm_feature(env, ARM_FEATURE_EL3)) {
7027         len = MIN(len, 0xf & (uint32_t)cr[3]);
7028     }
7029 
7030     map &= MAKE_64BIT_MASK(0, len + 1);
7031     if (map != 0) {
7032         return 31 - clz32(map);
7033     }
7034 
7035     /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */
7036     assert(sm);
7037     return ctz32(cpu->sme_vq.map);
7038 }
7039 
7040 uint32_t sve_vqm1_for_el(CPUARMState *env, int el)
7041 {
7042     return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM));
7043 }
7044 
7045 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
7046                       uint64_t value)
7047 {
7048     int cur_el = arm_current_el(env);
7049     int old_len = sve_vqm1_for_el(env, cur_el);
7050     int new_len;
7051 
7052     /* Bits other than [3:0] are RAZ/WI.  */
7053     QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
7054     raw_write(env, ri, value & 0xf);
7055 
7056     /*
7057      * Because we arrived here, we know both FP and SVE are enabled;
7058      * otherwise we would have trapped access to the ZCR_ELn register.
7059      */
7060     new_len = sve_vqm1_for_el(env, cur_el);
7061     if (new_len < old_len) {
7062         aarch64_sve_narrow_vq(env, new_len + 1);
7063     }
7064 }
7065 
7066 static const ARMCPRegInfo zcr_reginfo[] = {
7067     { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
7068       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
7069       .nv2_redirect_offset = 0x1e0 | NV2_REDIR_NV1,
7070       .access = PL1_RW, .type = ARM_CP_SVE,
7071       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
7072       .writefn = zcr_write, .raw_writefn = raw_write },
7073     { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
7074       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
7075       .access = PL2_RW, .type = ARM_CP_SVE,
7076       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
7077       .writefn = zcr_write, .raw_writefn = raw_write },
7078     { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
7079       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
7080       .access = PL3_RW, .type = ARM_CP_SVE,
7081       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
7082       .writefn = zcr_write, .raw_writefn = raw_write },
7083 };
7084 
7085 #ifdef TARGET_AARCH64
7086 static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
7087                                     bool isread)
7088 {
7089     int el = arm_current_el(env);
7090 
7091     if (el == 0) {
7092         uint64_t sctlr = arm_sctlr(env, el);
7093         if (!(sctlr & SCTLR_EnTP2)) {
7094             return CP_ACCESS_TRAP;
7095         }
7096     }
7097     /* TODO: FEAT_FGT */
7098     if (el < 3
7099         && arm_feature(env, ARM_FEATURE_EL3)
7100         && !(env->cp15.scr_el3 & SCR_ENTP2)) {
7101         return CP_ACCESS_TRAP_EL3;
7102     }
7103     return CP_ACCESS_OK;
7104 }
7105 
7106 static CPAccessResult access_smprimap(CPUARMState *env, const ARMCPRegInfo *ri,
7107                                       bool isread)
7108 {
7109     /* If EL1 this is a FEAT_NV access and CPTR_EL3.ESM doesn't apply */
7110     if (arm_current_el(env) == 2
7111         && arm_feature(env, ARM_FEATURE_EL3)
7112         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
7113         return CP_ACCESS_TRAP_EL3;
7114     }
7115     return CP_ACCESS_OK;
7116 }
7117 
7118 static CPAccessResult access_smpri(CPUARMState *env, const ARMCPRegInfo *ri,
7119                                    bool isread)
7120 {
7121     if (arm_current_el(env) < 3
7122         && arm_feature(env, ARM_FEATURE_EL3)
7123         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
7124         return CP_ACCESS_TRAP_EL3;
7125     }
7126     return CP_ACCESS_OK;
7127 }
7128 
7129 /* ResetSVEState */
7130 static void arm_reset_sve_state(CPUARMState *env)
7131 {
7132     memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs));
7133     /* Recall that FFR is stored as pregs[16]. */
7134     memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs));
7135     vfp_set_fpcr(env, 0x0800009f);
7136 }
7137 
7138 void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
7139 {
7140     uint64_t change = (env->svcr ^ new) & mask;
7141 
7142     if (change == 0) {
7143         return;
7144     }
7145     env->svcr ^= change;
7146 
7147     if (change & R_SVCR_SM_MASK) {
7148         arm_reset_sve_state(env);
7149     }
7150 
7151     /*
7152      * ResetSMEState.
7153      *
7154      * SetPSTATE_ZA zeros on enable and disable.  We can zero this only
7155      * on enable: while disabled, the storage is inaccessible and the
7156      * value does not matter.  We're not saving the storage in vmstate
7157      * when disabled either.
7158      */
7159     if (change & new & R_SVCR_ZA_MASK) {
7160         memset(env->zarray, 0, sizeof(env->zarray));
7161     }
7162 
7163     if (tcg_enabled()) {
7164         arm_rebuild_hflags(env);
7165     }
7166 }
7167 
7168 static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
7169                        uint64_t value)
7170 {
7171     aarch64_set_svcr(env, value, -1);
7172 }
7173 
7174 static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
7175                        uint64_t value)
7176 {
7177     int cur_el = arm_current_el(env);
7178     int old_len = sve_vqm1_for_el(env, cur_el);
7179     int new_len;
7180 
7181     QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1);
7182     value &= R_SMCR_LEN_MASK | R_SMCR_FA64_MASK;
7183     raw_write(env, ri, value);
7184 
7185     /*
7186      * Note that it is CONSTRAINED UNPREDICTABLE what happens to ZA storage
7187      * when SVL is widened (old values kept, or zeros).  Choose to keep the
7188      * current values for simplicity.  But for QEMU internals, we must still
7189      * apply the narrower SVL to the Zregs and Pregs -- see the comment
7190      * above aarch64_sve_narrow_vq.
7191      */
7192     new_len = sve_vqm1_for_el(env, cur_el);
7193     if (new_len < old_len) {
7194         aarch64_sve_narrow_vq(env, new_len + 1);
7195     }
7196 }
7197 
7198 static const ARMCPRegInfo sme_reginfo[] = {
7199     { .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64,
7200       .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5,
7201       .access = PL0_RW, .accessfn = access_tpidr2,
7202       .fgt = FGT_NTPIDR2_EL0,
7203       .fieldoffset = offsetof(CPUARMState, cp15.tpidr2_el0) },
7204     { .name = "SVCR", .state = ARM_CP_STATE_AA64,
7205       .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 2,
7206       .access = PL0_RW, .type = ARM_CP_SME,
7207       .fieldoffset = offsetof(CPUARMState, svcr),
7208       .writefn = svcr_write, .raw_writefn = raw_write },
7209     { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64,
7210       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6,
7211       .nv2_redirect_offset = 0x1f0 | NV2_REDIR_NV1,
7212       .access = PL1_RW, .type = ARM_CP_SME,
7213       .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]),
7214       .writefn = smcr_write, .raw_writefn = raw_write },
7215     { .name = "SMCR_EL2", .state = ARM_CP_STATE_AA64,
7216       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 6,
7217       .access = PL2_RW, .type = ARM_CP_SME,
7218       .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[2]),
7219       .writefn = smcr_write, .raw_writefn = raw_write },
7220     { .name = "SMCR_EL3", .state = ARM_CP_STATE_AA64,
7221       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 6,
7222       .access = PL3_RW, .type = ARM_CP_SME,
7223       .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[3]),
7224       .writefn = smcr_write, .raw_writefn = raw_write },
7225     { .name = "SMIDR_EL1", .state = ARM_CP_STATE_AA64,
7226       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 6,
7227       .access = PL1_R, .accessfn = access_aa64_tid1,
7228       /*
7229        * IMPLEMENTOR = 0 (software)
7230        * REVISION    = 0 (implementation defined)
7231        * SMPS        = 0 (no streaming execution priority in QEMU)
7232        * AFFINITY    = 0 (streaming sve mode not shared with other PEs)
7233        */
7234       .type = ARM_CP_CONST, .resetvalue = 0, },
7235     /*
7236      * Because SMIDR_EL1.SMPS is 0, SMPRI_EL1 and SMPRIMAP_EL2 are RES 0.
7237      */
7238     { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64,
7239       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4,
7240       .access = PL1_RW, .accessfn = access_smpri,
7241       .fgt = FGT_NSMPRI_EL1,
7242       .type = ARM_CP_CONST, .resetvalue = 0 },
7243     { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64,
7244       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5,
7245       .nv2_redirect_offset = 0x1f8,
7246       .access = PL2_RW, .accessfn = access_smprimap,
7247       .type = ARM_CP_CONST, .resetvalue = 0 },
7248 };
7249 
7250 static void tlbi_aa64_paall_write(CPUARMState *env, const ARMCPRegInfo *ri,
7251                                   uint64_t value)
7252 {
7253     CPUState *cs = env_cpu(env);
7254 
7255     tlb_flush(cs);
7256 }
7257 
7258 static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri,
7259                         uint64_t value)
7260 {
7261     /* L0GPTSZ is RO; other bits not mentioned are RES0. */
7262     uint64_t rw_mask = R_GPCCR_PPS_MASK | R_GPCCR_IRGN_MASK |
7263         R_GPCCR_ORGN_MASK | R_GPCCR_SH_MASK | R_GPCCR_PGS_MASK |
7264         R_GPCCR_GPC_MASK | R_GPCCR_GPCP_MASK;
7265 
7266     env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask);
7267 }
7268 
7269 static void gpccr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
7270 {
7271     env->cp15.gpccr_el3 = FIELD_DP64(0, GPCCR, L0GPTSZ,
7272                                      env_archcpu(env)->reset_l0gptsz);
7273 }
7274 
7275 static void tlbi_aa64_paallos_write(CPUARMState *env, const ARMCPRegInfo *ri,
7276                                     uint64_t value)
7277 {
7278     CPUState *cs = env_cpu(env);
7279 
7280     tlb_flush_all_cpus_synced(cs);
7281 }
7282 
7283 static const ARMCPRegInfo rme_reginfo[] = {
7284     { .name = "GPCCR_EL3", .state = ARM_CP_STATE_AA64,
7285       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 6,
7286       .access = PL3_RW, .writefn = gpccr_write, .resetfn = gpccr_reset,
7287       .fieldoffset = offsetof(CPUARMState, cp15.gpccr_el3) },
7288     { .name = "GPTBR_EL3", .state = ARM_CP_STATE_AA64,
7289       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 4,
7290       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.gptbr_el3) },
7291     { .name = "MFAR_EL3", .state = ARM_CP_STATE_AA64,
7292       .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 5,
7293       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mfar_el3) },
7294     { .name = "TLBI_PAALL", .state = ARM_CP_STATE_AA64,
7295       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 4,
7296       .access = PL3_W, .type = ARM_CP_NO_RAW,
7297       .writefn = tlbi_aa64_paall_write },
7298     { .name = "TLBI_PAALLOS", .state = ARM_CP_STATE_AA64,
7299       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 4,
7300       .access = PL3_W, .type = ARM_CP_NO_RAW,
7301       .writefn = tlbi_aa64_paallos_write },
7302     /*
7303      * QEMU does not have a way to invalidate by physical address, thus
7304      * invalidating a range of physical addresses is accomplished by
7305      * flushing all tlb entries in the outer shareable domain,
7306      * just like PAALLOS.
7307      */
7308     { .name = "TLBI_RPALOS", .state = ARM_CP_STATE_AA64,
7309       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 7,
7310       .access = PL3_W, .type = ARM_CP_NO_RAW,
7311       .writefn = tlbi_aa64_paallos_write },
7312     { .name = "TLBI_RPAOS", .state = ARM_CP_STATE_AA64,
7313       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 3,
7314       .access = PL3_W, .type = ARM_CP_NO_RAW,
7315       .writefn = tlbi_aa64_paallos_write },
7316     { .name = "DC_CIPAPA", .state = ARM_CP_STATE_AA64,
7317       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 1,
7318       .access = PL3_W, .type = ARM_CP_NOP },
7319 };
7320 
7321 static const ARMCPRegInfo rme_mte_reginfo[] = {
7322     { .name = "DC_CIGDPAPA", .state = ARM_CP_STATE_AA64,
7323       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 5,
7324       .access = PL3_W, .type = ARM_CP_NOP },
7325 };
7326 #endif /* TARGET_AARCH64 */
7327 
7328 static void define_pmu_regs(ARMCPU *cpu)
7329 {
7330     /*
7331      * v7 performance monitor control register: same implementor
7332      * field as main ID register, and we implement four counters in
7333      * addition to the cycle count register.
7334      */
7335     unsigned int i, pmcrn = pmu_num_counters(&cpu->env);
7336     ARMCPRegInfo pmcr = {
7337         .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
7338         .access = PL0_RW,
7339         .fgt = FGT_PMCR_EL0,
7340         .type = ARM_CP_IO | ARM_CP_ALIAS,
7341         .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
7342         .accessfn = pmreg_access,
7343         .readfn = pmcr_read, .raw_readfn = raw_read,
7344         .writefn = pmcr_write, .raw_writefn = raw_write,
7345     };
7346     ARMCPRegInfo pmcr64 = {
7347         .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
7348         .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
7349         .access = PL0_RW, .accessfn = pmreg_access,
7350         .fgt = FGT_PMCR_EL0,
7351         .type = ARM_CP_IO,
7352         .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
7353         .resetvalue = cpu->isar.reset_pmcr_el0,
7354         .readfn = pmcr_read, .raw_readfn = raw_read,
7355         .writefn = pmcr_write, .raw_writefn = raw_write,
7356     };
7357 
7358     define_one_arm_cp_reg(cpu, &pmcr);
7359     define_one_arm_cp_reg(cpu, &pmcr64);
7360     for (i = 0; i < pmcrn; i++) {
7361         char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
7362         char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
7363         char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
7364         char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
7365         ARMCPRegInfo pmev_regs[] = {
7366             { .name = pmevcntr_name, .cp = 15, .crn = 14,
7367               .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
7368               .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
7369               .fgt = FGT_PMEVCNTRN_EL0,
7370               .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
7371               .accessfn = pmreg_access_xevcntr },
7372             { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
7373               .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
7374               .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
7375               .type = ARM_CP_IO,
7376               .fgt = FGT_PMEVCNTRN_EL0,
7377               .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
7378               .raw_readfn = pmevcntr_rawread,
7379               .raw_writefn = pmevcntr_rawwrite },
7380             { .name = pmevtyper_name, .cp = 15, .crn = 14,
7381               .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
7382               .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
7383               .fgt = FGT_PMEVTYPERN_EL0,
7384               .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
7385               .accessfn = pmreg_access },
7386             { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
7387               .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
7388               .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
7389               .fgt = FGT_PMEVTYPERN_EL0,
7390               .type = ARM_CP_IO,
7391               .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
7392               .raw_writefn = pmevtyper_rawwrite },
7393         };
7394         define_arm_cp_regs(cpu, pmev_regs);
7395         g_free(pmevcntr_name);
7396         g_free(pmevcntr_el0_name);
7397         g_free(pmevtyper_name);
7398         g_free(pmevtyper_el0_name);
7399     }
7400     if (cpu_isar_feature(aa32_pmuv3p1, cpu)) {
7401         ARMCPRegInfo v81_pmu_regs[] = {
7402             { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
7403               .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
7404               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7405               .fgt = FGT_PMCEIDN_EL0,
7406               .resetvalue = extract64(cpu->pmceid0, 32, 32) },
7407             { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
7408               .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
7409               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7410               .fgt = FGT_PMCEIDN_EL0,
7411               .resetvalue = extract64(cpu->pmceid1, 32, 32) },
7412         };
7413         define_arm_cp_regs(cpu, v81_pmu_regs);
7414     }
7415     if (cpu_isar_feature(any_pmuv3p4, cpu)) {
7416         static const ARMCPRegInfo v84_pmmir = {
7417             .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
7418             .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
7419             .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7420             .fgt = FGT_PMMIR_EL1,
7421             .resetvalue = 0
7422         };
7423         define_one_arm_cp_reg(cpu, &v84_pmmir);
7424     }
7425 }
7426 
7427 #ifndef CONFIG_USER_ONLY
7428 /*
7429  * We don't know until after realize whether there's a GICv3
7430  * attached, and that is what registers the gicv3 sysregs.
7431  * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
7432  * at runtime.
7433  */
7434 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
7435 {
7436     ARMCPU *cpu = env_archcpu(env);
7437     uint64_t pfr1 = cpu->isar.id_pfr1;
7438 
7439     if (env->gicv3state) {
7440         pfr1 |= 1 << 28;
7441     }
7442     return pfr1;
7443 }
7444 
7445 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
7446 {
7447     ARMCPU *cpu = env_archcpu(env);
7448     uint64_t pfr0 = cpu->isar.id_aa64pfr0;
7449 
7450     if (env->gicv3state) {
7451         pfr0 |= 1 << 24;
7452     }
7453     return pfr0;
7454 }
7455 #endif
7456 
7457 /*
7458  * Shared logic between LORID and the rest of the LOR* registers.
7459  * Secure state exclusion has already been dealt with.
7460  */
7461 static CPAccessResult access_lor_ns(CPUARMState *env,
7462                                     const ARMCPRegInfo *ri, bool isread)
7463 {
7464     int el = arm_current_el(env);
7465 
7466     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
7467         return CP_ACCESS_TRAP_EL2;
7468     }
7469     if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
7470         return CP_ACCESS_TRAP_EL3;
7471     }
7472     return CP_ACCESS_OK;
7473 }
7474 
7475 static CPAccessResult access_lor_other(CPUARMState *env,
7476                                        const ARMCPRegInfo *ri, bool isread)
7477 {
7478     if (arm_is_secure_below_el3(env)) {
7479         /* Access denied in secure mode.  */
7480         return CP_ACCESS_TRAP;
7481     }
7482     return access_lor_ns(env, ri, isread);
7483 }
7484 
7485 /*
7486  * A trivial implementation of ARMv8.1-LOR leaves all of these
7487  * registers fixed at 0, which indicates that there are zero
7488  * supported Limited Ordering regions.
7489  */
7490 static const ARMCPRegInfo lor_reginfo[] = {
7491     { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
7492       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
7493       .access = PL1_RW, .accessfn = access_lor_other,
7494       .fgt = FGT_LORSA_EL1,
7495       .type = ARM_CP_CONST, .resetvalue = 0 },
7496     { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
7497       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
7498       .access = PL1_RW, .accessfn = access_lor_other,
7499       .fgt = FGT_LOREA_EL1,
7500       .type = ARM_CP_CONST, .resetvalue = 0 },
7501     { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
7502       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
7503       .access = PL1_RW, .accessfn = access_lor_other,
7504       .fgt = FGT_LORN_EL1,
7505       .type = ARM_CP_CONST, .resetvalue = 0 },
7506     { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
7507       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
7508       .access = PL1_RW, .accessfn = access_lor_other,
7509       .fgt = FGT_LORC_EL1,
7510       .type = ARM_CP_CONST, .resetvalue = 0 },
7511     { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
7512       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
7513       .access = PL1_R, .accessfn = access_lor_ns,
7514       .fgt = FGT_LORID_EL1,
7515       .type = ARM_CP_CONST, .resetvalue = 0 },
7516 };
7517 
7518 #ifdef TARGET_AARCH64
7519 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
7520                                    bool isread)
7521 {
7522     int el = arm_current_el(env);
7523 
7524     if (el < 2 &&
7525         arm_is_el2_enabled(env) &&
7526         !(arm_hcr_el2_eff(env) & HCR_APK)) {
7527         return CP_ACCESS_TRAP_EL2;
7528     }
7529     if (el < 3 &&
7530         arm_feature(env, ARM_FEATURE_EL3) &&
7531         !(env->cp15.scr_el3 & SCR_APK)) {
7532         return CP_ACCESS_TRAP_EL3;
7533     }
7534     return CP_ACCESS_OK;
7535 }
7536 
7537 static const ARMCPRegInfo pauth_reginfo[] = {
7538     { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7539       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
7540       .access = PL1_RW, .accessfn = access_pauth,
7541       .fgt = FGT_APDAKEY,
7542       .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
7543     { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7544       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
7545       .access = PL1_RW, .accessfn = access_pauth,
7546       .fgt = FGT_APDAKEY,
7547       .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
7548     { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7549       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
7550       .access = PL1_RW, .accessfn = access_pauth,
7551       .fgt = FGT_APDBKEY,
7552       .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
7553     { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7554       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
7555       .access = PL1_RW, .accessfn = access_pauth,
7556       .fgt = FGT_APDBKEY,
7557       .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
7558     { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7559       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
7560       .access = PL1_RW, .accessfn = access_pauth,
7561       .fgt = FGT_APGAKEY,
7562       .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
7563     { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7564       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
7565       .access = PL1_RW, .accessfn = access_pauth,
7566       .fgt = FGT_APGAKEY,
7567       .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
7568     { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7569       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
7570       .access = PL1_RW, .accessfn = access_pauth,
7571       .fgt = FGT_APIAKEY,
7572       .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
7573     { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7574       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
7575       .access = PL1_RW, .accessfn = access_pauth,
7576       .fgt = FGT_APIAKEY,
7577       .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
7578     { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7579       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
7580       .access = PL1_RW, .accessfn = access_pauth,
7581       .fgt = FGT_APIBKEY,
7582       .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
7583     { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7584       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
7585       .access = PL1_RW, .accessfn = access_pauth,
7586       .fgt = FGT_APIBKEY,
7587       .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
7588 };
7589 
7590 static const ARMCPRegInfo tlbirange_reginfo[] = {
7591     { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
7592       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
7593       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
7594       .fgt = FGT_TLBIRVAE1IS,
7595       .writefn = tlbi_aa64_rvae1is_write },
7596     { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
7597       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
7598       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
7599       .fgt = FGT_TLBIRVAAE1IS,
7600       .writefn = tlbi_aa64_rvae1is_write },
7601    { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
7602       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
7603       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
7604       .fgt = FGT_TLBIRVALE1IS,
7605       .writefn = tlbi_aa64_rvae1is_write },
7606     { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
7607       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
7608       .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
7609       .fgt = FGT_TLBIRVAALE1IS,
7610       .writefn = tlbi_aa64_rvae1is_write },
7611     { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
7612       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
7613       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7614       .fgt = FGT_TLBIRVAE1OS,
7615       .writefn = tlbi_aa64_rvae1is_write },
7616     { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64,
7617       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3,
7618       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7619       .fgt = FGT_TLBIRVAAE1OS,
7620       .writefn = tlbi_aa64_rvae1is_write },
7621    { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64,
7622       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5,
7623       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7624       .fgt = FGT_TLBIRVALE1OS,
7625       .writefn = tlbi_aa64_rvae1is_write },
7626     { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64,
7627       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7,
7628       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7629       .fgt = FGT_TLBIRVAALE1OS,
7630       .writefn = tlbi_aa64_rvae1is_write },
7631     { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
7632       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
7633       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
7634       .fgt = FGT_TLBIRVAE1,
7635       .writefn = tlbi_aa64_rvae1_write },
7636     { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
7637       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
7638       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
7639       .fgt = FGT_TLBIRVAAE1,
7640       .writefn = tlbi_aa64_rvae1_write },
7641    { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
7642       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
7643       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
7644       .fgt = FGT_TLBIRVALE1,
7645       .writefn = tlbi_aa64_rvae1_write },
7646     { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
7647       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
7648       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
7649       .fgt = FGT_TLBIRVAALE1,
7650       .writefn = tlbi_aa64_rvae1_write },
7651     { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
7652       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
7653       .access = PL2_W, .type = ARM_CP_NO_RAW,
7654       .writefn = tlbi_aa64_ripas2e1is_write },
7655     { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
7656       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
7657       .access = PL2_W, .type = ARM_CP_NO_RAW,
7658       .writefn = tlbi_aa64_ripas2e1is_write },
7659     { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
7660       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
7661       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7662       .writefn = tlbi_aa64_rvae2is_write },
7663    { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
7664       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
7665       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7666       .writefn = tlbi_aa64_rvae2is_write },
7667     { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
7668       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
7669       .access = PL2_W, .type = ARM_CP_NO_RAW,
7670       .writefn = tlbi_aa64_ripas2e1_write },
7671     { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
7672       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
7673       .access = PL2_W, .type = ARM_CP_NO_RAW,
7674       .writefn = tlbi_aa64_ripas2e1_write },
7675    { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
7676       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
7677       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7678       .writefn = tlbi_aa64_rvae2is_write },
7679    { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
7680       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
7681       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7682       .writefn = tlbi_aa64_rvae2is_write },
7683     { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
7684       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
7685       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7686       .writefn = tlbi_aa64_rvae2_write },
7687    { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
7688       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
7689       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7690       .writefn = tlbi_aa64_rvae2_write },
7691    { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
7692       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
7693       .access = PL3_W, .type = ARM_CP_NO_RAW,
7694       .writefn = tlbi_aa64_rvae3is_write },
7695    { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
7696       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
7697       .access = PL3_W, .type = ARM_CP_NO_RAW,
7698       .writefn = tlbi_aa64_rvae3is_write },
7699    { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64,
7700       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
7701       .access = PL3_W, .type = ARM_CP_NO_RAW,
7702       .writefn = tlbi_aa64_rvae3is_write },
7703    { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64,
7704       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
7705       .access = PL3_W, .type = ARM_CP_NO_RAW,
7706       .writefn = tlbi_aa64_rvae3is_write },
7707    { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
7708       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
7709       .access = PL3_W, .type = ARM_CP_NO_RAW,
7710       .writefn = tlbi_aa64_rvae3_write },
7711    { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
7712       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
7713       .access = PL3_W, .type = ARM_CP_NO_RAW,
7714       .writefn = tlbi_aa64_rvae3_write },
7715 };
7716 
7717 static const ARMCPRegInfo tlbios_reginfo[] = {
7718     { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64,
7719       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
7720       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7721       .fgt = FGT_TLBIVMALLE1OS,
7722       .writefn = tlbi_aa64_vmalle1is_write },
7723     { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64,
7724       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1,
7725       .fgt = FGT_TLBIVAE1OS,
7726       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7727       .writefn = tlbi_aa64_vae1is_write },
7728     { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
7729       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
7730       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7731       .fgt = FGT_TLBIASIDE1OS,
7732       .writefn = tlbi_aa64_vmalle1is_write },
7733     { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64,
7734       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3,
7735       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7736       .fgt = FGT_TLBIVAAE1OS,
7737       .writefn = tlbi_aa64_vae1is_write },
7738     { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64,
7739       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5,
7740       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7741       .fgt = FGT_TLBIVALE1OS,
7742       .writefn = tlbi_aa64_vae1is_write },
7743     { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64,
7744       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7,
7745       .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7746       .fgt = FGT_TLBIVAALE1OS,
7747       .writefn = tlbi_aa64_vae1is_write },
7748     { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
7749       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
7750       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7751       .writefn = tlbi_aa64_alle2is_write },
7752     { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64,
7753       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1,
7754       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7755       .writefn = tlbi_aa64_vae2is_write },
7756    { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
7757       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
7758       .access = PL2_W, .type = ARM_CP_NO_RAW,
7759       .writefn = tlbi_aa64_alle1is_write },
7760     { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64,
7761       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5,
7762       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7763       .writefn = tlbi_aa64_vae2is_write },
7764     { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
7765       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
7766       .access = PL2_W, .type = ARM_CP_NO_RAW,
7767       .writefn = tlbi_aa64_alle1is_write },
7768     { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64,
7769       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0,
7770       .access = PL2_W, .type = ARM_CP_NOP },
7771     { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64,
7772       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3,
7773       .access = PL2_W, .type = ARM_CP_NOP },
7774     { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64,
7775       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4,
7776       .access = PL2_W, .type = ARM_CP_NOP },
7777     { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64,
7778       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7,
7779       .access = PL2_W, .type = ARM_CP_NOP },
7780     { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64,
7781       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
7782       .access = PL3_W, .type = ARM_CP_NO_RAW,
7783       .writefn = tlbi_aa64_alle3is_write },
7784     { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64,
7785       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1,
7786       .access = PL3_W, .type = ARM_CP_NO_RAW,
7787       .writefn = tlbi_aa64_vae3is_write },
7788     { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64,
7789       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5,
7790       .access = PL3_W, .type = ARM_CP_NO_RAW,
7791       .writefn = tlbi_aa64_vae3is_write },
7792 };
7793 
7794 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
7795 {
7796     Error *err = NULL;
7797     uint64_t ret;
7798 
7799     /* Success sets NZCV = 0000.  */
7800     env->NF = env->CF = env->VF = 0, env->ZF = 1;
7801 
7802     if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
7803         /*
7804          * ??? Failed, for unknown reasons in the crypto subsystem.
7805          * The best we can do is log the reason and return the
7806          * timed-out indication to the guest.  There is no reason
7807          * we know to expect this failure to be transitory, so the
7808          * guest may well hang retrying the operation.
7809          */
7810         qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
7811                       ri->name, error_get_pretty(err));
7812         error_free(err);
7813 
7814         env->ZF = 0; /* NZCF = 0100 */
7815         return 0;
7816     }
7817     return ret;
7818 }
7819 
7820 /* We do not support re-seeding, so the two registers operate the same.  */
7821 static const ARMCPRegInfo rndr_reginfo[] = {
7822     { .name = "RNDR", .state = ARM_CP_STATE_AA64,
7823       .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
7824       .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
7825       .access = PL0_R, .readfn = rndr_readfn },
7826     { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
7827       .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
7828       .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
7829       .access = PL0_R, .readfn = rndr_readfn },
7830 };
7831 
7832 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
7833                           uint64_t value)
7834 {
7835 #ifdef CONFIG_TCG
7836     ARMCPU *cpu = env_archcpu(env);
7837     /* CTR_EL0 System register -> DminLine, bits [19:16] */
7838     uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
7839     uint64_t vaddr_in = (uint64_t) value;
7840     uint64_t vaddr = vaddr_in & ~(dline_size - 1);
7841     void *haddr;
7842     int mem_idx = cpu_mmu_index(env, false);
7843 
7844     /* This won't be crossing page boundaries */
7845     haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
7846     if (haddr) {
7847 #ifndef CONFIG_USER_ONLY
7848 
7849         ram_addr_t offset;
7850         MemoryRegion *mr;
7851 
7852         /* RCU lock is already being held */
7853         mr = memory_region_from_host(haddr, &offset);
7854 
7855         if (mr) {
7856             memory_region_writeback(mr, offset, dline_size);
7857         }
7858 #endif /*CONFIG_USER_ONLY*/
7859     }
7860 #else
7861     /* Handled by hardware accelerator. */
7862     g_assert_not_reached();
7863 #endif /* CONFIG_TCG */
7864 }
7865 
7866 static const ARMCPRegInfo dcpop_reg[] = {
7867     { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
7868       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
7869       .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
7870       .fgt = FGT_DCCVAP,
7871       .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
7872 };
7873 
7874 static const ARMCPRegInfo dcpodp_reg[] = {
7875     { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
7876       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
7877       .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
7878       .fgt = FGT_DCCVADP,
7879       .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
7880 };
7881 
7882 static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
7883                                        bool isread)
7884 {
7885     if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
7886         return CP_ACCESS_TRAP_EL2;
7887     }
7888 
7889     return CP_ACCESS_OK;
7890 }
7891 
7892 static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
7893                                  bool isread)
7894 {
7895     int el = arm_current_el(env);
7896     if (el < 2 && arm_is_el2_enabled(env)) {
7897         uint64_t hcr = arm_hcr_el2_eff(env);
7898         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
7899             return CP_ACCESS_TRAP_EL2;
7900         }
7901     }
7902     if (el < 3 &&
7903         arm_feature(env, ARM_FEATURE_EL3) &&
7904         !(env->cp15.scr_el3 & SCR_ATA)) {
7905         return CP_ACCESS_TRAP_EL3;
7906     }
7907     return CP_ACCESS_OK;
7908 }
7909 
7910 static CPAccessResult access_tfsr_el1(CPUARMState *env, const ARMCPRegInfo *ri,
7911                                       bool isread)
7912 {
7913     CPAccessResult nv1 = access_nv1(env, ri, isread);
7914 
7915     if (nv1 != CP_ACCESS_OK) {
7916         return nv1;
7917     }
7918     return access_mte(env, ri, isread);
7919 }
7920 
7921 static CPAccessResult access_tfsr_el2(CPUARMState *env, const ARMCPRegInfo *ri,
7922                                       bool isread)
7923 {
7924     /*
7925      * TFSR_EL2: similar to generic access_mte(), but we need to
7926      * account for FEAT_NV. At EL1 this must be a FEAT_NV access;
7927      * if NV2 is enabled then we will redirect this to TFSR_EL1
7928      * after doing the HCR and SCR ATA traps; otherwise this will
7929      * be a trap to EL2 and the HCR/SCR traps do not apply.
7930      */
7931     int el = arm_current_el(env);
7932 
7933     if (el == 1 && (arm_hcr_el2_eff(env) & HCR_NV2)) {
7934         return CP_ACCESS_OK;
7935     }
7936     if (el < 2 && arm_is_el2_enabled(env)) {
7937         uint64_t hcr = arm_hcr_el2_eff(env);
7938         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
7939             return CP_ACCESS_TRAP_EL2;
7940         }
7941     }
7942     if (el < 3 &&
7943         arm_feature(env, ARM_FEATURE_EL3) &&
7944         !(env->cp15.scr_el3 & SCR_ATA)) {
7945         return CP_ACCESS_TRAP_EL3;
7946     }
7947     return CP_ACCESS_OK;
7948 }
7949 
7950 static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
7951 {
7952     return env->pstate & PSTATE_TCO;
7953 }
7954 
7955 static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
7956 {
7957     env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
7958 }
7959 
7960 static const ARMCPRegInfo mte_reginfo[] = {
7961     { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
7962       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
7963       .access = PL1_RW, .accessfn = access_mte,
7964       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
7965     { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
7966       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
7967       .access = PL1_RW, .accessfn = access_tfsr_el1,
7968       .nv2_redirect_offset = 0x190 | NV2_REDIR_NV1,
7969       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
7970     { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
7971       .type = ARM_CP_NV2_REDIRECT,
7972       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
7973       .access = PL2_RW, .accessfn = access_tfsr_el2,
7974       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
7975     { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
7976       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
7977       .access = PL3_RW,
7978       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
7979     { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
7980       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
7981       .access = PL1_RW, .accessfn = access_mte,
7982       .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
7983     { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
7984       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
7985       .access = PL1_RW, .accessfn = access_mte,
7986       .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
7987     { .name = "TCO", .state = ARM_CP_STATE_AA64,
7988       .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
7989       .type = ARM_CP_NO_RAW,
7990       .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
7991     { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
7992       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
7993       .type = ARM_CP_NOP, .access = PL1_W,
7994       .fgt = FGT_DCIVAC,
7995       .accessfn = aa64_cacheop_poc_access },
7996     { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
7997       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
7998       .fgt = FGT_DCISW,
7999       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8000     { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
8001       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
8002       .type = ARM_CP_NOP, .access = PL1_W,
8003       .fgt = FGT_DCIVAC,
8004       .accessfn = aa64_cacheop_poc_access },
8005     { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
8006       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
8007       .fgt = FGT_DCISW,
8008       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8009     { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
8010       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
8011       .fgt = FGT_DCCSW,
8012       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8013     { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
8014       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
8015       .fgt = FGT_DCCSW,
8016       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8017     { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
8018       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
8019       .fgt = FGT_DCCISW,
8020       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8021     { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
8022       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
8023       .fgt = FGT_DCCISW,
8024       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8025 };
8026 
8027 static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
8028     { .name = "TCO", .state = ARM_CP_STATE_AA64,
8029       .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
8030       .type = ARM_CP_CONST, .access = PL0_RW, },
8031 };
8032 
8033 static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
8034     { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
8035       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
8036       .type = ARM_CP_NOP, .access = PL0_W,
8037       .fgt = FGT_DCCVAC,
8038       .accessfn = aa64_cacheop_poc_access },
8039     { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
8040       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
8041       .type = ARM_CP_NOP, .access = PL0_W,
8042       .fgt = FGT_DCCVAC,
8043       .accessfn = aa64_cacheop_poc_access },
8044     { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
8045       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
8046       .type = ARM_CP_NOP, .access = PL0_W,
8047       .fgt = FGT_DCCVAP,
8048       .accessfn = aa64_cacheop_poc_access },
8049     { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
8050       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
8051       .type = ARM_CP_NOP, .access = PL0_W,
8052       .fgt = FGT_DCCVAP,
8053       .accessfn = aa64_cacheop_poc_access },
8054     { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
8055       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
8056       .type = ARM_CP_NOP, .access = PL0_W,
8057       .fgt = FGT_DCCVADP,
8058       .accessfn = aa64_cacheop_poc_access },
8059     { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
8060       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
8061       .type = ARM_CP_NOP, .access = PL0_W,
8062       .fgt = FGT_DCCVADP,
8063       .accessfn = aa64_cacheop_poc_access },
8064     { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
8065       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
8066       .type = ARM_CP_NOP, .access = PL0_W,
8067       .fgt = FGT_DCCIVAC,
8068       .accessfn = aa64_cacheop_poc_access },
8069     { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
8070       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
8071       .type = ARM_CP_NOP, .access = PL0_W,
8072       .fgt = FGT_DCCIVAC,
8073       .accessfn = aa64_cacheop_poc_access },
8074     { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
8075       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
8076       .access = PL0_W, .type = ARM_CP_DC_GVA,
8077 #ifndef CONFIG_USER_ONLY
8078       /* Avoid overhead of an access check that always passes in user-mode */
8079       .accessfn = aa64_zva_access,
8080       .fgt = FGT_DCZVA,
8081 #endif
8082     },
8083     { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
8084       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
8085       .access = PL0_W, .type = ARM_CP_DC_GZVA,
8086 #ifndef CONFIG_USER_ONLY
8087       /* Avoid overhead of an access check that always passes in user-mode */
8088       .accessfn = aa64_zva_access,
8089       .fgt = FGT_DCZVA,
8090 #endif
8091     },
8092 };
8093 
8094 static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri,
8095                                      bool isread)
8096 {
8097     uint64_t hcr = arm_hcr_el2_eff(env);
8098     int el = arm_current_el(env);
8099 
8100     if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) {
8101         if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) {
8102             if (hcr & HCR_TGE) {
8103                 return CP_ACCESS_TRAP_EL2;
8104             }
8105             return CP_ACCESS_TRAP;
8106         }
8107     } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) {
8108         return CP_ACCESS_TRAP_EL2;
8109     }
8110     if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) {
8111         return CP_ACCESS_TRAP_EL2;
8112     }
8113     if (el < 3
8114         && arm_feature(env, ARM_FEATURE_EL3)
8115         && !(env->cp15.scr_el3 & SCR_ENSCXT)) {
8116         return CP_ACCESS_TRAP_EL3;
8117     }
8118     return CP_ACCESS_OK;
8119 }
8120 
8121 static CPAccessResult access_scxtnum_el1(CPUARMState *env,
8122                                          const ARMCPRegInfo *ri,
8123                                          bool isread)
8124 {
8125     CPAccessResult nv1 = access_nv1(env, ri, isread);
8126 
8127     if (nv1 != CP_ACCESS_OK) {
8128         return nv1;
8129     }
8130     return access_scxtnum(env, ri, isread);
8131 }
8132 
8133 static const ARMCPRegInfo scxtnum_reginfo[] = {
8134     { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64,
8135       .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7,
8136       .access = PL0_RW, .accessfn = access_scxtnum,
8137       .fgt = FGT_SCXTNUM_EL0,
8138       .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) },
8139     { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64,
8140       .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7,
8141       .access = PL1_RW, .accessfn = access_scxtnum_el1,
8142       .fgt = FGT_SCXTNUM_EL1,
8143       .nv2_redirect_offset = 0x188 | NV2_REDIR_NV1,
8144       .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) },
8145     { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
8146       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7,
8147       .access = PL2_RW, .accessfn = access_scxtnum,
8148       .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) },
8149     { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64,
8150       .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7,
8151       .access = PL3_RW,
8152       .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) },
8153 };
8154 
8155 static CPAccessResult access_fgt(CPUARMState *env, const ARMCPRegInfo *ri,
8156                                  bool isread)
8157 {
8158     if (arm_current_el(env) == 2 &&
8159         arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_FGTEN)) {
8160         return CP_ACCESS_TRAP_EL3;
8161     }
8162     return CP_ACCESS_OK;
8163 }
8164 
8165 static const ARMCPRegInfo fgt_reginfo[] = {
8166     { .name = "HFGRTR_EL2", .state = ARM_CP_STATE_AA64,
8167       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
8168       .nv2_redirect_offset = 0x1b8,
8169       .access = PL2_RW, .accessfn = access_fgt,
8170       .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HFGRTR]) },
8171     { .name = "HFGWTR_EL2", .state = ARM_CP_STATE_AA64,
8172       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 5,
8173       .nv2_redirect_offset = 0x1c0,
8174       .access = PL2_RW, .accessfn = access_fgt,
8175       .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HFGWTR]) },
8176     { .name = "HDFGRTR_EL2", .state = ARM_CP_STATE_AA64,
8177       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 4,
8178       .nv2_redirect_offset = 0x1d0,
8179       .access = PL2_RW, .accessfn = access_fgt,
8180       .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HDFGRTR]) },
8181     { .name = "HDFGWTR_EL2", .state = ARM_CP_STATE_AA64,
8182       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 5,
8183       .nv2_redirect_offset = 0x1d8,
8184       .access = PL2_RW, .accessfn = access_fgt,
8185       .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HDFGWTR]) },
8186     { .name = "HFGITR_EL2", .state = ARM_CP_STATE_AA64,
8187       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 6,
8188       .nv2_redirect_offset = 0x1c8,
8189       .access = PL2_RW, .accessfn = access_fgt,
8190       .fieldoffset = offsetof(CPUARMState, cp15.fgt_exec[FGTREG_HFGITR]) },
8191 };
8192 
8193 static void vncr_write(CPUARMState *env, const ARMCPRegInfo *ri,
8194                        uint64_t value)
8195 {
8196     /*
8197      * Clear the RES0 bottom 12 bits; this means at runtime we can guarantee
8198      * that VNCR_EL2 + offset is 64-bit aligned. We don't need to do anything
8199      * about the RESS bits at the top -- we choose the "generate an EL2
8200      * translation abort on use" CONSTRAINED UNPREDICTABLE option (i.e. let
8201      * the ptw.c code detect the resulting invalid address).
8202      */
8203     env->cp15.vncr_el2 = value & ~0xfffULL;
8204 }
8205 
8206 static const ARMCPRegInfo nv2_reginfo[] = {
8207     { .name = "VNCR_EL2", .state = ARM_CP_STATE_AA64,
8208       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 2, .opc2 = 0,
8209       .access = PL2_RW,
8210       .writefn = vncr_write,
8211       .nv2_redirect_offset = 0xb0,
8212       .fieldoffset = offsetof(CPUARMState, cp15.vncr_el2) },
8213 };
8214 
8215 #endif /* TARGET_AARCH64 */
8216 
8217 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
8218                                      bool isread)
8219 {
8220     int el = arm_current_el(env);
8221 
8222     if (el == 0) {
8223         uint64_t sctlr = arm_sctlr(env, el);
8224         if (!(sctlr & SCTLR_EnRCTX)) {
8225             return CP_ACCESS_TRAP;
8226         }
8227     } else if (el == 1) {
8228         uint64_t hcr = arm_hcr_el2_eff(env);
8229         if (hcr & HCR_NV) {
8230             return CP_ACCESS_TRAP_EL2;
8231         }
8232     }
8233     return CP_ACCESS_OK;
8234 }
8235 
8236 static const ARMCPRegInfo predinv_reginfo[] = {
8237     { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
8238       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
8239       .fgt = FGT_CFPRCTX,
8240       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8241     { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
8242       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
8243       .fgt = FGT_DVPRCTX,
8244       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8245     { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
8246       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
8247       .fgt = FGT_CPPRCTX,
8248       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8249     /*
8250      * Note the AArch32 opcodes have a different OPC1.
8251      */
8252     { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
8253       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
8254       .fgt = FGT_CFPRCTX,
8255       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8256     { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
8257       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
8258       .fgt = FGT_DVPRCTX,
8259       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8260     { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
8261       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
8262       .fgt = FGT_CPPRCTX,
8263       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8264 };
8265 
8266 static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
8267 {
8268     /* Read the high 32 bits of the current CCSIDR */
8269     return extract64(ccsidr_read(env, ri), 32, 32);
8270 }
8271 
8272 static const ARMCPRegInfo ccsidr2_reginfo[] = {
8273     { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
8274       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
8275       .access = PL1_R,
8276       .accessfn = access_tid4,
8277       .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
8278 };
8279 
8280 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
8281                                        bool isread)
8282 {
8283     if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
8284         return CP_ACCESS_TRAP_EL2;
8285     }
8286 
8287     return CP_ACCESS_OK;
8288 }
8289 
8290 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
8291                                        bool isread)
8292 {
8293     if (arm_feature(env, ARM_FEATURE_V8)) {
8294         return access_aa64_tid3(env, ri, isread);
8295     }
8296 
8297     return CP_ACCESS_OK;
8298 }
8299 
8300 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
8301                                      bool isread)
8302 {
8303     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
8304         return CP_ACCESS_TRAP_EL2;
8305     }
8306 
8307     return CP_ACCESS_OK;
8308 }
8309 
8310 static CPAccessResult access_joscr_jmcr(CPUARMState *env,
8311                                         const ARMCPRegInfo *ri, bool isread)
8312 {
8313     /*
8314      * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
8315      * in v7A, not in v8A.
8316      */
8317     if (!arm_feature(env, ARM_FEATURE_V8) &&
8318         arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
8319         (env->cp15.hstr_el2 & HSTR_TJDBX)) {
8320         return CP_ACCESS_TRAP_EL2;
8321     }
8322     return CP_ACCESS_OK;
8323 }
8324 
8325 static const ARMCPRegInfo jazelle_regs[] = {
8326     { .name = "JIDR",
8327       .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
8328       .access = PL1_R, .accessfn = access_jazelle,
8329       .type = ARM_CP_CONST, .resetvalue = 0 },
8330     { .name = "JOSCR",
8331       .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
8332       .accessfn = access_joscr_jmcr,
8333       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
8334     { .name = "JMCR",
8335       .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
8336       .accessfn = access_joscr_jmcr,
8337       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
8338 };
8339 
8340 static const ARMCPRegInfo contextidr_el2 = {
8341     .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
8342     .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
8343     .access = PL2_RW,
8344     .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2])
8345 };
8346 
8347 static const ARMCPRegInfo vhe_reginfo[] = {
8348     { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
8349       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
8350       .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
8351       .raw_writefn = raw_write,
8352       .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
8353 #ifndef CONFIG_USER_ONLY
8354     { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
8355       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
8356       .fieldoffset =
8357         offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
8358       .type = ARM_CP_IO, .access = PL2_RW,
8359       .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
8360     { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
8361       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
8362       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
8363       .resetfn = gt_hv_timer_reset,
8364       .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
8365     { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
8366       .type = ARM_CP_IO,
8367       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
8368       .access = PL2_RW,
8369       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
8370       .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
8371     { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
8372       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
8373       .type = ARM_CP_IO | ARM_CP_ALIAS,
8374       .access = PL2_RW, .accessfn = e2h_access,
8375       .nv2_redirect_offset = 0x180 | NV2_REDIR_NO_NV1,
8376       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
8377       .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
8378     { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
8379       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
8380       .type = ARM_CP_IO | ARM_CP_ALIAS,
8381       .access = PL2_RW, .accessfn = e2h_access,
8382       .nv2_redirect_offset = 0x170 | NV2_REDIR_NO_NV1,
8383       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
8384       .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
8385     { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
8386       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
8387       .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
8388       .access = PL2_RW, .accessfn = e2h_access,
8389       .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
8390     { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
8391       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
8392       .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
8393       .access = PL2_RW, .accessfn = e2h_access,
8394       .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
8395     { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
8396       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
8397       .type = ARM_CP_IO | ARM_CP_ALIAS,
8398       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
8399       .nv2_redirect_offset = 0x178 | NV2_REDIR_NO_NV1,
8400       .access = PL2_RW, .accessfn = e2h_access,
8401       .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
8402     { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
8403       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
8404       .type = ARM_CP_IO | ARM_CP_ALIAS,
8405       .nv2_redirect_offset = 0x168 | NV2_REDIR_NO_NV1,
8406       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
8407       .access = PL2_RW, .accessfn = e2h_access,
8408       .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
8409 #endif
8410 };
8411 
8412 #ifndef CONFIG_USER_ONLY
8413 static const ARMCPRegInfo ats1e1_reginfo[] = {
8414     { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64,
8415       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
8416       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
8417       .fgt = FGT_ATS1E1RP,
8418       .accessfn = at_s1e01_access, .writefn = ats_write64 },
8419     { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64,
8420       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
8421       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
8422       .fgt = FGT_ATS1E1WP,
8423       .accessfn = at_s1e01_access, .writefn = ats_write64 },
8424 };
8425 
8426 static const ARMCPRegInfo ats1cp_reginfo[] = {
8427     { .name = "ATS1CPRP",
8428       .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
8429       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
8430       .writefn = ats_write },
8431     { .name = "ATS1CPWP",
8432       .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
8433       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
8434       .writefn = ats_write },
8435 };
8436 #endif
8437 
8438 /*
8439  * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
8440  * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
8441  * is non-zero, which is never for ARMv7, optionally in ARMv8
8442  * and mandatorily for ARMv8.2 and up.
8443  * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
8444  * implementation is RAZ/WI we can ignore this detail, as we
8445  * do for ACTLR.
8446  */
8447 static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
8448     { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
8449       .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
8450       .access = PL1_RW, .accessfn = access_tacr,
8451       .type = ARM_CP_CONST, .resetvalue = 0 },
8452     { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
8453       .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
8454       .access = PL2_RW, .type = ARM_CP_CONST,
8455       .resetvalue = 0 },
8456 };
8457 
8458 void register_cp_regs_for_features(ARMCPU *cpu)
8459 {
8460     /* Register all the coprocessor registers based on feature bits */
8461     CPUARMState *env = &cpu->env;
8462     if (arm_feature(env, ARM_FEATURE_M)) {
8463         /* M profile has no coprocessor registers */
8464         return;
8465     }
8466 
8467     define_arm_cp_regs(cpu, cp_reginfo);
8468     if (!arm_feature(env, ARM_FEATURE_V8)) {
8469         /*
8470          * Must go early as it is full of wildcards that may be
8471          * overridden by later definitions.
8472          */
8473         define_arm_cp_regs(cpu, not_v8_cp_reginfo);
8474     }
8475 
8476     if (arm_feature(env, ARM_FEATURE_V6)) {
8477         /* The ID registers all have impdef reset values */
8478         ARMCPRegInfo v6_idregs[] = {
8479             { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
8480               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
8481               .access = PL1_R, .type = ARM_CP_CONST,
8482               .accessfn = access_aa32_tid3,
8483               .resetvalue = cpu->isar.id_pfr0 },
8484             /*
8485              * ID_PFR1 is not a plain ARM_CP_CONST because we don't know
8486              * the value of the GIC field until after we define these regs.
8487              */
8488             { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
8489               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
8490               .access = PL1_R, .type = ARM_CP_NO_RAW,
8491               .accessfn = access_aa32_tid3,
8492 #ifdef CONFIG_USER_ONLY
8493               .type = ARM_CP_CONST,
8494               .resetvalue = cpu->isar.id_pfr1,
8495 #else
8496               .type = ARM_CP_NO_RAW,
8497               .accessfn = access_aa32_tid3,
8498               .readfn = id_pfr1_read,
8499               .writefn = arm_cp_write_ignore
8500 #endif
8501             },
8502             { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
8503               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
8504               .access = PL1_R, .type = ARM_CP_CONST,
8505               .accessfn = access_aa32_tid3,
8506               .resetvalue = cpu->isar.id_dfr0 },
8507             { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
8508               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
8509               .access = PL1_R, .type = ARM_CP_CONST,
8510               .accessfn = access_aa32_tid3,
8511               .resetvalue = cpu->id_afr0 },
8512             { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
8513               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
8514               .access = PL1_R, .type = ARM_CP_CONST,
8515               .accessfn = access_aa32_tid3,
8516               .resetvalue = cpu->isar.id_mmfr0 },
8517             { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
8518               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
8519               .access = PL1_R, .type = ARM_CP_CONST,
8520               .accessfn = access_aa32_tid3,
8521               .resetvalue = cpu->isar.id_mmfr1 },
8522             { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
8523               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
8524               .access = PL1_R, .type = ARM_CP_CONST,
8525               .accessfn = access_aa32_tid3,
8526               .resetvalue = cpu->isar.id_mmfr2 },
8527             { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
8528               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
8529               .access = PL1_R, .type = ARM_CP_CONST,
8530               .accessfn = access_aa32_tid3,
8531               .resetvalue = cpu->isar.id_mmfr3 },
8532             { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
8533               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
8534               .access = PL1_R, .type = ARM_CP_CONST,
8535               .accessfn = access_aa32_tid3,
8536               .resetvalue = cpu->isar.id_isar0 },
8537             { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
8538               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
8539               .access = PL1_R, .type = ARM_CP_CONST,
8540               .accessfn = access_aa32_tid3,
8541               .resetvalue = cpu->isar.id_isar1 },
8542             { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
8543               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
8544               .access = PL1_R, .type = ARM_CP_CONST,
8545               .accessfn = access_aa32_tid3,
8546               .resetvalue = cpu->isar.id_isar2 },
8547             { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
8548               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
8549               .access = PL1_R, .type = ARM_CP_CONST,
8550               .accessfn = access_aa32_tid3,
8551               .resetvalue = cpu->isar.id_isar3 },
8552             { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
8553               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
8554               .access = PL1_R, .type = ARM_CP_CONST,
8555               .accessfn = access_aa32_tid3,
8556               .resetvalue = cpu->isar.id_isar4 },
8557             { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
8558               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
8559               .access = PL1_R, .type = ARM_CP_CONST,
8560               .accessfn = access_aa32_tid3,
8561               .resetvalue = cpu->isar.id_isar5 },
8562             { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
8563               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
8564               .access = PL1_R, .type = ARM_CP_CONST,
8565               .accessfn = access_aa32_tid3,
8566               .resetvalue = cpu->isar.id_mmfr4 },
8567             { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
8568               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
8569               .access = PL1_R, .type = ARM_CP_CONST,
8570               .accessfn = access_aa32_tid3,
8571               .resetvalue = cpu->isar.id_isar6 },
8572         };
8573         define_arm_cp_regs(cpu, v6_idregs);
8574         define_arm_cp_regs(cpu, v6_cp_reginfo);
8575     } else {
8576         define_arm_cp_regs(cpu, not_v6_cp_reginfo);
8577     }
8578     if (arm_feature(env, ARM_FEATURE_V6K)) {
8579         define_arm_cp_regs(cpu, v6k_cp_reginfo);
8580     }
8581     if (arm_feature(env, ARM_FEATURE_V7MP) &&
8582         !arm_feature(env, ARM_FEATURE_PMSA)) {
8583         define_arm_cp_regs(cpu, v7mp_cp_reginfo);
8584     }
8585     if (arm_feature(env, ARM_FEATURE_V7VE)) {
8586         define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
8587     }
8588     if (arm_feature(env, ARM_FEATURE_V7)) {
8589         ARMCPRegInfo clidr = {
8590             .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
8591             .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
8592             .access = PL1_R, .type = ARM_CP_CONST,
8593             .accessfn = access_tid4,
8594             .fgt = FGT_CLIDR_EL1,
8595             .resetvalue = cpu->clidr
8596         };
8597         define_one_arm_cp_reg(cpu, &clidr);
8598         define_arm_cp_regs(cpu, v7_cp_reginfo);
8599         define_debug_regs(cpu);
8600         define_pmu_regs(cpu);
8601     } else {
8602         define_arm_cp_regs(cpu, not_v7_cp_reginfo);
8603     }
8604     if (arm_feature(env, ARM_FEATURE_V8)) {
8605         /*
8606          * v8 ID registers, which all have impdef reset values.
8607          * Note that within the ID register ranges the unused slots
8608          * must all RAZ, not UNDEF; future architecture versions may
8609          * define new registers here.
8610          * ID registers which are AArch64 views of the AArch32 ID registers
8611          * which already existed in v6 and v7 are handled elsewhere,
8612          * in v6_idregs[].
8613          */
8614         int i;
8615         ARMCPRegInfo v8_idregs[] = {
8616             /*
8617              * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
8618              * emulation because we don't know the right value for the
8619              * GIC field until after we define these regs.
8620              */
8621             { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
8622               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
8623               .access = PL1_R,
8624 #ifdef CONFIG_USER_ONLY
8625               .type = ARM_CP_CONST,
8626               .resetvalue = cpu->isar.id_aa64pfr0
8627 #else
8628               .type = ARM_CP_NO_RAW,
8629               .accessfn = access_aa64_tid3,
8630               .readfn = id_aa64pfr0_read,
8631               .writefn = arm_cp_write_ignore
8632 #endif
8633             },
8634             { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
8635               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
8636               .access = PL1_R, .type = ARM_CP_CONST,
8637               .accessfn = access_aa64_tid3,
8638               .resetvalue = cpu->isar.id_aa64pfr1},
8639             { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8640               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
8641               .access = PL1_R, .type = ARM_CP_CONST,
8642               .accessfn = access_aa64_tid3,
8643               .resetvalue = 0 },
8644             { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8645               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
8646               .access = PL1_R, .type = ARM_CP_CONST,
8647               .accessfn = access_aa64_tid3,
8648               .resetvalue = 0 },
8649             { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
8650               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
8651               .access = PL1_R, .type = ARM_CP_CONST,
8652               .accessfn = access_aa64_tid3,
8653               .resetvalue = cpu->isar.id_aa64zfr0 },
8654             { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64,
8655               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
8656               .access = PL1_R, .type = ARM_CP_CONST,
8657               .accessfn = access_aa64_tid3,
8658               .resetvalue = cpu->isar.id_aa64smfr0 },
8659             { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8660               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
8661               .access = PL1_R, .type = ARM_CP_CONST,
8662               .accessfn = access_aa64_tid3,
8663               .resetvalue = 0 },
8664             { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8665               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
8666               .access = PL1_R, .type = ARM_CP_CONST,
8667               .accessfn = access_aa64_tid3,
8668               .resetvalue = 0 },
8669             { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
8670               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
8671               .access = PL1_R, .type = ARM_CP_CONST,
8672               .accessfn = access_aa64_tid3,
8673               .resetvalue = cpu->isar.id_aa64dfr0 },
8674             { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
8675               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
8676               .access = PL1_R, .type = ARM_CP_CONST,
8677               .accessfn = access_aa64_tid3,
8678               .resetvalue = cpu->isar.id_aa64dfr1 },
8679             { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8680               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
8681               .access = PL1_R, .type = ARM_CP_CONST,
8682               .accessfn = access_aa64_tid3,
8683               .resetvalue = 0 },
8684             { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8685               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
8686               .access = PL1_R, .type = ARM_CP_CONST,
8687               .accessfn = access_aa64_tid3,
8688               .resetvalue = 0 },
8689             { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
8690               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
8691               .access = PL1_R, .type = ARM_CP_CONST,
8692               .accessfn = access_aa64_tid3,
8693               .resetvalue = cpu->id_aa64afr0 },
8694             { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
8695               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
8696               .access = PL1_R, .type = ARM_CP_CONST,
8697               .accessfn = access_aa64_tid3,
8698               .resetvalue = cpu->id_aa64afr1 },
8699             { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8700               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
8701               .access = PL1_R, .type = ARM_CP_CONST,
8702               .accessfn = access_aa64_tid3,
8703               .resetvalue = 0 },
8704             { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8705               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
8706               .access = PL1_R, .type = ARM_CP_CONST,
8707               .accessfn = access_aa64_tid3,
8708               .resetvalue = 0 },
8709             { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
8710               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
8711               .access = PL1_R, .type = ARM_CP_CONST,
8712               .accessfn = access_aa64_tid3,
8713               .resetvalue = cpu->isar.id_aa64isar0 },
8714             { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
8715               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
8716               .access = PL1_R, .type = ARM_CP_CONST,
8717               .accessfn = access_aa64_tid3,
8718               .resetvalue = cpu->isar.id_aa64isar1 },
8719             { .name = "ID_AA64ISAR2_EL1", .state = ARM_CP_STATE_AA64,
8720               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
8721               .access = PL1_R, .type = ARM_CP_CONST,
8722               .accessfn = access_aa64_tid3,
8723               .resetvalue = cpu->isar.id_aa64isar2 },
8724             { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8725               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
8726               .access = PL1_R, .type = ARM_CP_CONST,
8727               .accessfn = access_aa64_tid3,
8728               .resetvalue = 0 },
8729             { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8730               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
8731               .access = PL1_R, .type = ARM_CP_CONST,
8732               .accessfn = access_aa64_tid3,
8733               .resetvalue = 0 },
8734             { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8735               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
8736               .access = PL1_R, .type = ARM_CP_CONST,
8737               .accessfn = access_aa64_tid3,
8738               .resetvalue = 0 },
8739             { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8740               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
8741               .access = PL1_R, .type = ARM_CP_CONST,
8742               .accessfn = access_aa64_tid3,
8743               .resetvalue = 0 },
8744             { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8745               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
8746               .access = PL1_R, .type = ARM_CP_CONST,
8747               .accessfn = access_aa64_tid3,
8748               .resetvalue = 0 },
8749             { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
8750               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
8751               .access = PL1_R, .type = ARM_CP_CONST,
8752               .accessfn = access_aa64_tid3,
8753               .resetvalue = cpu->isar.id_aa64mmfr0 },
8754             { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
8755               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
8756               .access = PL1_R, .type = ARM_CP_CONST,
8757               .accessfn = access_aa64_tid3,
8758               .resetvalue = cpu->isar.id_aa64mmfr1 },
8759             { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
8760               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
8761               .access = PL1_R, .type = ARM_CP_CONST,
8762               .accessfn = access_aa64_tid3,
8763               .resetvalue = cpu->isar.id_aa64mmfr2 },
8764             { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8765               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
8766               .access = PL1_R, .type = ARM_CP_CONST,
8767               .accessfn = access_aa64_tid3,
8768               .resetvalue = 0 },
8769             { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8770               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
8771               .access = PL1_R, .type = ARM_CP_CONST,
8772               .accessfn = access_aa64_tid3,
8773               .resetvalue = 0 },
8774             { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8775               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
8776               .access = PL1_R, .type = ARM_CP_CONST,
8777               .accessfn = access_aa64_tid3,
8778               .resetvalue = 0 },
8779             { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8780               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
8781               .access = PL1_R, .type = ARM_CP_CONST,
8782               .accessfn = access_aa64_tid3,
8783               .resetvalue = 0 },
8784             { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8785               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
8786               .access = PL1_R, .type = ARM_CP_CONST,
8787               .accessfn = access_aa64_tid3,
8788               .resetvalue = 0 },
8789             { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
8790               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
8791               .access = PL1_R, .type = ARM_CP_CONST,
8792               .accessfn = access_aa64_tid3,
8793               .resetvalue = cpu->isar.mvfr0 },
8794             { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
8795               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
8796               .access = PL1_R, .type = ARM_CP_CONST,
8797               .accessfn = access_aa64_tid3,
8798               .resetvalue = cpu->isar.mvfr1 },
8799             { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
8800               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
8801               .access = PL1_R, .type = ARM_CP_CONST,
8802               .accessfn = access_aa64_tid3,
8803               .resetvalue = cpu->isar.mvfr2 },
8804             /*
8805              * "0, c0, c3, {0,1,2}" are the encodings corresponding to
8806              * AArch64 MVFR[012]_EL1. Define the STATE_AA32 encoding
8807              * as RAZ, since it is in the "reserved for future ID
8808              * registers, RAZ" part of the AArch32 encoding space.
8809              */
8810             { .name = "RES_0_C0_C3_0", .state = ARM_CP_STATE_AA32,
8811               .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
8812               .access = PL1_R, .type = ARM_CP_CONST,
8813               .accessfn = access_aa64_tid3,
8814               .resetvalue = 0 },
8815             { .name = "RES_0_C0_C3_1", .state = ARM_CP_STATE_AA32,
8816               .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
8817               .access = PL1_R, .type = ARM_CP_CONST,
8818               .accessfn = access_aa64_tid3,
8819               .resetvalue = 0 },
8820             { .name = "RES_0_C0_C3_2", .state = ARM_CP_STATE_AA32,
8821               .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
8822               .access = PL1_R, .type = ARM_CP_CONST,
8823               .accessfn = access_aa64_tid3,
8824               .resetvalue = 0 },
8825             /*
8826              * Other encodings in "0, c0, c3, ..." are STATE_BOTH because
8827              * they're also RAZ for AArch64, and in v8 are gradually
8828              * being filled with AArch64-view-of-AArch32-ID-register
8829              * for new ID registers.
8830              */
8831             { .name = "RES_0_C0_C3_3", .state = ARM_CP_STATE_BOTH,
8832               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
8833               .access = PL1_R, .type = ARM_CP_CONST,
8834               .accessfn = access_aa64_tid3,
8835               .resetvalue = 0 },
8836             { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH,
8837               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
8838               .access = PL1_R, .type = ARM_CP_CONST,
8839               .accessfn = access_aa64_tid3,
8840               .resetvalue = cpu->isar.id_pfr2 },
8841             { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH,
8842               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
8843               .access = PL1_R, .type = ARM_CP_CONST,
8844               .accessfn = access_aa64_tid3,
8845               .resetvalue = cpu->isar.id_dfr1 },
8846             { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH,
8847               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
8848               .access = PL1_R, .type = ARM_CP_CONST,
8849               .accessfn = access_aa64_tid3,
8850               .resetvalue = cpu->isar.id_mmfr5 },
8851             { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH,
8852               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
8853               .access = PL1_R, .type = ARM_CP_CONST,
8854               .accessfn = access_aa64_tid3,
8855               .resetvalue = 0 },
8856             { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
8857               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
8858               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
8859               .fgt = FGT_PMCEIDN_EL0,
8860               .resetvalue = extract64(cpu->pmceid0, 0, 32) },
8861             { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
8862               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
8863               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
8864               .fgt = FGT_PMCEIDN_EL0,
8865               .resetvalue = cpu->pmceid0 },
8866             { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
8867               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
8868               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
8869               .fgt = FGT_PMCEIDN_EL0,
8870               .resetvalue = extract64(cpu->pmceid1, 0, 32) },
8871             { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
8872               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
8873               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
8874               .fgt = FGT_PMCEIDN_EL0,
8875               .resetvalue = cpu->pmceid1 },
8876         };
8877 #ifdef CONFIG_USER_ONLY
8878         static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
8879             { .name = "ID_AA64PFR0_EL1",
8880               .exported_bits = R_ID_AA64PFR0_FP_MASK |
8881                                R_ID_AA64PFR0_ADVSIMD_MASK |
8882                                R_ID_AA64PFR0_SVE_MASK |
8883                                R_ID_AA64PFR0_DIT_MASK,
8884               .fixed_bits = (0x1u << R_ID_AA64PFR0_EL0_SHIFT) |
8885                             (0x1u << R_ID_AA64PFR0_EL1_SHIFT) },
8886             { .name = "ID_AA64PFR1_EL1",
8887               .exported_bits = R_ID_AA64PFR1_BT_MASK |
8888                                R_ID_AA64PFR1_SSBS_MASK |
8889                                R_ID_AA64PFR1_MTE_MASK |
8890                                R_ID_AA64PFR1_SME_MASK },
8891             { .name = "ID_AA64PFR*_EL1_RESERVED",
8892               .is_glob = true },
8893             { .name = "ID_AA64ZFR0_EL1",
8894               .exported_bits = R_ID_AA64ZFR0_SVEVER_MASK |
8895                                R_ID_AA64ZFR0_AES_MASK |
8896                                R_ID_AA64ZFR0_BITPERM_MASK |
8897                                R_ID_AA64ZFR0_BFLOAT16_MASK |
8898                                R_ID_AA64ZFR0_SHA3_MASK |
8899                                R_ID_AA64ZFR0_SM4_MASK |
8900                                R_ID_AA64ZFR0_I8MM_MASK |
8901                                R_ID_AA64ZFR0_F32MM_MASK |
8902                                R_ID_AA64ZFR0_F64MM_MASK },
8903             { .name = "ID_AA64SMFR0_EL1",
8904               .exported_bits = R_ID_AA64SMFR0_F32F32_MASK |
8905                                R_ID_AA64SMFR0_BI32I32_MASK |
8906                                R_ID_AA64SMFR0_B16F32_MASK |
8907                                R_ID_AA64SMFR0_F16F32_MASK |
8908                                R_ID_AA64SMFR0_I8I32_MASK |
8909                                R_ID_AA64SMFR0_F16F16_MASK |
8910                                R_ID_AA64SMFR0_B16B16_MASK |
8911                                R_ID_AA64SMFR0_I16I32_MASK |
8912                                R_ID_AA64SMFR0_F64F64_MASK |
8913                                R_ID_AA64SMFR0_I16I64_MASK |
8914                                R_ID_AA64SMFR0_SMEVER_MASK |
8915                                R_ID_AA64SMFR0_FA64_MASK },
8916             { .name = "ID_AA64MMFR0_EL1",
8917               .exported_bits = R_ID_AA64MMFR0_ECV_MASK,
8918               .fixed_bits = (0xfu << R_ID_AA64MMFR0_TGRAN64_SHIFT) |
8919                             (0xfu << R_ID_AA64MMFR0_TGRAN4_SHIFT) },
8920             { .name = "ID_AA64MMFR1_EL1",
8921               .exported_bits = R_ID_AA64MMFR1_AFP_MASK },
8922             { .name = "ID_AA64MMFR2_EL1",
8923               .exported_bits = R_ID_AA64MMFR2_AT_MASK },
8924             { .name = "ID_AA64MMFR*_EL1_RESERVED",
8925               .is_glob = true },
8926             { .name = "ID_AA64DFR0_EL1",
8927               .fixed_bits = (0x6u << R_ID_AA64DFR0_DEBUGVER_SHIFT) },
8928             { .name = "ID_AA64DFR1_EL1" },
8929             { .name = "ID_AA64DFR*_EL1_RESERVED",
8930               .is_glob = true },
8931             { .name = "ID_AA64AFR*",
8932               .is_glob = true },
8933             { .name = "ID_AA64ISAR0_EL1",
8934               .exported_bits = R_ID_AA64ISAR0_AES_MASK |
8935                                R_ID_AA64ISAR0_SHA1_MASK |
8936                                R_ID_AA64ISAR0_SHA2_MASK |
8937                                R_ID_AA64ISAR0_CRC32_MASK |
8938                                R_ID_AA64ISAR0_ATOMIC_MASK |
8939                                R_ID_AA64ISAR0_RDM_MASK |
8940                                R_ID_AA64ISAR0_SHA3_MASK |
8941                                R_ID_AA64ISAR0_SM3_MASK |
8942                                R_ID_AA64ISAR0_SM4_MASK |
8943                                R_ID_AA64ISAR0_DP_MASK |
8944                                R_ID_AA64ISAR0_FHM_MASK |
8945                                R_ID_AA64ISAR0_TS_MASK |
8946                                R_ID_AA64ISAR0_RNDR_MASK },
8947             { .name = "ID_AA64ISAR1_EL1",
8948               .exported_bits = R_ID_AA64ISAR1_DPB_MASK |
8949                                R_ID_AA64ISAR1_APA_MASK |
8950                                R_ID_AA64ISAR1_API_MASK |
8951                                R_ID_AA64ISAR1_JSCVT_MASK |
8952                                R_ID_AA64ISAR1_FCMA_MASK |
8953                                R_ID_AA64ISAR1_LRCPC_MASK |
8954                                R_ID_AA64ISAR1_GPA_MASK |
8955                                R_ID_AA64ISAR1_GPI_MASK |
8956                                R_ID_AA64ISAR1_FRINTTS_MASK |
8957                                R_ID_AA64ISAR1_SB_MASK |
8958                                R_ID_AA64ISAR1_BF16_MASK |
8959                                R_ID_AA64ISAR1_DGH_MASK |
8960                                R_ID_AA64ISAR1_I8MM_MASK },
8961             { .name = "ID_AA64ISAR2_EL1",
8962               .exported_bits = R_ID_AA64ISAR2_WFXT_MASK |
8963                                R_ID_AA64ISAR2_RPRES_MASK |
8964                                R_ID_AA64ISAR2_GPA3_MASK |
8965                                R_ID_AA64ISAR2_APA3_MASK |
8966                                R_ID_AA64ISAR2_MOPS_MASK |
8967                                R_ID_AA64ISAR2_BC_MASK |
8968                                R_ID_AA64ISAR2_RPRFM_MASK |
8969                                R_ID_AA64ISAR2_CSSC_MASK },
8970             { .name = "ID_AA64ISAR*_EL1_RESERVED",
8971               .is_glob = true },
8972         };
8973         modify_arm_cp_regs(v8_idregs, v8_user_idregs);
8974 #endif
8975         /*
8976          * RVBAR_EL1 and RMR_EL1 only implemented if EL1 is the highest EL.
8977          * TODO: For RMR, a write with bit 1 set should do something with
8978          * cpu_reset(). In the meantime, "the bit is strictly a request",
8979          * so we are in spec just ignoring writes.
8980          */
8981         if (!arm_feature(env, ARM_FEATURE_EL3) &&
8982             !arm_feature(env, ARM_FEATURE_EL2)) {
8983             ARMCPRegInfo el1_reset_regs[] = {
8984                 { .name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH,
8985                   .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
8986                   .access = PL1_R,
8987                   .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
8988                 { .name = "RMR_EL1", .state = ARM_CP_STATE_BOTH,
8989                   .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
8990                   .access = PL1_RW, .type = ARM_CP_CONST,
8991                   .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) }
8992             };
8993             define_arm_cp_regs(cpu, el1_reset_regs);
8994         }
8995         define_arm_cp_regs(cpu, v8_idregs);
8996         define_arm_cp_regs(cpu, v8_cp_reginfo);
8997         if (cpu_isar_feature(aa64_aa32_el1, cpu)) {
8998             define_arm_cp_regs(cpu, v8_aa32_el1_reginfo);
8999         }
9000 
9001         for (i = 4; i < 16; i++) {
9002             /*
9003              * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32.
9004              * For pre-v8 cores there are RAZ patterns for these in
9005              * id_pre_v8_midr_cp_reginfo[]; for v8 we do that here.
9006              * v8 extends the "must RAZ" part of the ID register space
9007              * to also cover c0, 0, c{8-15}, {0-7}.
9008              * These are STATE_AA32 because in the AArch64 sysreg space
9009              * c4-c7 is where the AArch64 ID registers live (and we've
9010              * already defined those in v8_idregs[]), and c8-c15 are not
9011              * "must RAZ" for AArch64.
9012              */
9013             g_autofree char *name = g_strdup_printf("RES_0_C0_C%d_X", i);
9014             ARMCPRegInfo v8_aa32_raz_idregs = {
9015                 .name = name,
9016                 .state = ARM_CP_STATE_AA32,
9017                 .cp = 15, .opc1 = 0, .crn = 0, .crm = i, .opc2 = CP_ANY,
9018                 .access = PL1_R, .type = ARM_CP_CONST,
9019                 .accessfn = access_aa64_tid3,
9020                 .resetvalue = 0 };
9021             define_one_arm_cp_reg(cpu, &v8_aa32_raz_idregs);
9022         }
9023     }
9024 
9025     /*
9026      * Register the base EL2 cpregs.
9027      * Pre v8, these registers are implemented only as part of the
9028      * Virtualization Extensions (EL2 present).  Beginning with v8,
9029      * if EL2 is missing but EL3 is enabled, mostly these become
9030      * RES0 from EL3, with some specific exceptions.
9031      */
9032     if (arm_feature(env, ARM_FEATURE_EL2)
9033         || (arm_feature(env, ARM_FEATURE_EL3)
9034             && arm_feature(env, ARM_FEATURE_V8))) {
9035         uint64_t vmpidr_def = mpidr_read_val(env);
9036         ARMCPRegInfo vpidr_regs[] = {
9037             { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
9038               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
9039               .access = PL2_RW, .accessfn = access_el3_aa32ns,
9040               .resetvalue = cpu->midr,
9041               .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
9042               .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
9043             { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
9044               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
9045               .access = PL2_RW, .resetvalue = cpu->midr,
9046               .type = ARM_CP_EL3_NO_EL2_C_NZ,
9047               .nv2_redirect_offset = 0x88,
9048               .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
9049             { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
9050               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
9051               .access = PL2_RW, .accessfn = access_el3_aa32ns,
9052               .resetvalue = vmpidr_def,
9053               .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
9054               .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
9055             { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
9056               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
9057               .access = PL2_RW, .resetvalue = vmpidr_def,
9058               .type = ARM_CP_EL3_NO_EL2_C_NZ,
9059               .nv2_redirect_offset = 0x50,
9060               .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
9061         };
9062         /*
9063          * The only field of MDCR_EL2 that has a defined architectural reset
9064          * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
9065          */
9066         ARMCPRegInfo mdcr_el2 = {
9067             .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_IO,
9068             .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
9069             .writefn = mdcr_el2_write,
9070             .access = PL2_RW, .resetvalue = pmu_num_counters(env),
9071             .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2),
9072         };
9073         define_one_arm_cp_reg(cpu, &mdcr_el2);
9074         define_arm_cp_regs(cpu, vpidr_regs);
9075         define_arm_cp_regs(cpu, el2_cp_reginfo);
9076         if (arm_feature(env, ARM_FEATURE_V8)) {
9077             define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
9078         }
9079         if (cpu_isar_feature(aa64_sel2, cpu)) {
9080             define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
9081         }
9082         /*
9083          * RVBAR_EL2 and RMR_EL2 only implemented if EL2 is the highest EL.
9084          * See commentary near RMR_EL1.
9085          */
9086         if (!arm_feature(env, ARM_FEATURE_EL3)) {
9087             static const ARMCPRegInfo el2_reset_regs[] = {
9088                 { .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
9089                   .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
9090                   .access = PL2_R,
9091                   .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
9092                 { .name = "RVBAR", .type = ARM_CP_ALIAS,
9093                   .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
9094                   .access = PL2_R,
9095                   .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
9096                 { .name = "RMR_EL2", .state = ARM_CP_STATE_AA64,
9097                   .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 2,
9098                   .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
9099             };
9100             define_arm_cp_regs(cpu, el2_reset_regs);
9101         }
9102     }
9103 
9104     /* Register the base EL3 cpregs. */
9105     if (arm_feature(env, ARM_FEATURE_EL3)) {
9106         define_arm_cp_regs(cpu, el3_cp_reginfo);
9107         ARMCPRegInfo el3_regs[] = {
9108             { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
9109               .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
9110               .access = PL3_R,
9111               .fieldoffset = offsetof(CPUARMState, cp15.rvbar), },
9112             { .name = "RMR_EL3", .state = ARM_CP_STATE_AA64,
9113               .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 2,
9114               .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
9115             { .name = "RMR", .state = ARM_CP_STATE_AA32,
9116               .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
9117               .access = PL3_RW, .type = ARM_CP_CONST,
9118               .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) },
9119             { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
9120               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
9121               .access = PL3_RW,
9122               .raw_writefn = raw_write, .writefn = sctlr_write,
9123               .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
9124               .resetvalue = cpu->reset_sctlr },
9125         };
9126 
9127         define_arm_cp_regs(cpu, el3_regs);
9128     }
9129     /*
9130      * The behaviour of NSACR is sufficiently various that we don't
9131      * try to describe it in a single reginfo:
9132      *  if EL3 is 64 bit, then trap to EL3 from S EL1,
9133      *     reads as constant 0xc00 from NS EL1 and NS EL2
9134      *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
9135      *  if v7 without EL3, register doesn't exist
9136      *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
9137      */
9138     if (arm_feature(env, ARM_FEATURE_EL3)) {
9139         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
9140             static const ARMCPRegInfo nsacr = {
9141                 .name = "NSACR", .type = ARM_CP_CONST,
9142                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
9143                 .access = PL1_RW, .accessfn = nsacr_access,
9144                 .resetvalue = 0xc00
9145             };
9146             define_one_arm_cp_reg(cpu, &nsacr);
9147         } else {
9148             static const ARMCPRegInfo nsacr = {
9149                 .name = "NSACR",
9150                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
9151                 .access = PL3_RW | PL1_R,
9152                 .resetvalue = 0,
9153                 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
9154             };
9155             define_one_arm_cp_reg(cpu, &nsacr);
9156         }
9157     } else {
9158         if (arm_feature(env, ARM_FEATURE_V8)) {
9159             static const ARMCPRegInfo nsacr = {
9160                 .name = "NSACR", .type = ARM_CP_CONST,
9161                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
9162                 .access = PL1_R,
9163                 .resetvalue = 0xc00
9164             };
9165             define_one_arm_cp_reg(cpu, &nsacr);
9166         }
9167     }
9168 
9169     if (arm_feature(env, ARM_FEATURE_PMSA)) {
9170         if (arm_feature(env, ARM_FEATURE_V6)) {
9171             /* PMSAv6 not implemented */
9172             assert(arm_feature(env, ARM_FEATURE_V7));
9173             define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
9174             define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
9175         } else {
9176             define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
9177         }
9178     } else {
9179         define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
9180         define_arm_cp_regs(cpu, vmsa_cp_reginfo);
9181         /* TTCBR2 is introduced with ARMv8.2-AA32HPD.  */
9182         if (cpu_isar_feature(aa32_hpd, cpu)) {
9183             define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
9184         }
9185     }
9186     if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
9187         define_arm_cp_regs(cpu, t2ee_cp_reginfo);
9188     }
9189     if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
9190         define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
9191     }
9192     if (arm_feature(env, ARM_FEATURE_VAPA)) {
9193         ARMCPRegInfo vapa_cp_reginfo[] = {
9194             { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
9195               .access = PL1_RW, .resetvalue = 0,
9196               .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
9197                                      offsetoflow32(CPUARMState, cp15.par_ns) },
9198               .writefn = par_write},
9199 #ifndef CONFIG_USER_ONLY
9200             /* This underdecoding is safe because the reginfo is NO_RAW. */
9201             { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
9202               .access = PL1_W, .accessfn = ats_access,
9203               .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
9204 #endif
9205         };
9206 
9207         /*
9208          * When LPAE exists this 32-bit PAR register is an alias of the
9209          * 64-bit AArch32 PAR register defined in lpae_cp_reginfo[]
9210          */
9211         if (arm_feature(env, ARM_FEATURE_LPAE)) {
9212             vapa_cp_reginfo[0].type = ARM_CP_ALIAS | ARM_CP_NO_GDB;
9213         }
9214         define_arm_cp_regs(cpu, vapa_cp_reginfo);
9215     }
9216     if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
9217         define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
9218     }
9219     if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
9220         define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
9221     }
9222     if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
9223         define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
9224     }
9225     if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
9226         define_arm_cp_regs(cpu, omap_cp_reginfo);
9227     }
9228     if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
9229         define_arm_cp_regs(cpu, strongarm_cp_reginfo);
9230     }
9231     if (arm_feature(env, ARM_FEATURE_XSCALE)) {
9232         define_arm_cp_regs(cpu, xscale_cp_reginfo);
9233     }
9234     if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
9235         define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
9236     }
9237     if (arm_feature(env, ARM_FEATURE_LPAE)) {
9238         define_arm_cp_regs(cpu, lpae_cp_reginfo);
9239     }
9240     if (cpu_isar_feature(aa32_jazelle, cpu)) {
9241         define_arm_cp_regs(cpu, jazelle_regs);
9242     }
9243     /*
9244      * Slightly awkwardly, the OMAP and StrongARM cores need all of
9245      * cp15 crn=0 to be writes-ignored, whereas for other cores they should
9246      * be read-only (ie write causes UNDEF exception).
9247      */
9248     {
9249         ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
9250             /*
9251              * Pre-v8 MIDR space.
9252              * Note that the MIDR isn't a simple constant register because
9253              * of the TI925 behaviour where writes to another register can
9254              * cause the MIDR value to change.
9255              *
9256              * Unimplemented registers in the c15 0 0 0 space default to
9257              * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
9258              * and friends override accordingly.
9259              */
9260             { .name = "MIDR",
9261               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
9262               .access = PL1_R, .resetvalue = cpu->midr,
9263               .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
9264               .readfn = midr_read,
9265               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
9266               .type = ARM_CP_OVERRIDE },
9267             /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
9268             { .name = "DUMMY",
9269               .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
9270               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
9271             { .name = "DUMMY",
9272               .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
9273               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
9274             { .name = "DUMMY",
9275               .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
9276               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
9277             { .name = "DUMMY",
9278               .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
9279               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
9280             { .name = "DUMMY",
9281               .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
9282               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
9283         };
9284         ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
9285             { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
9286               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
9287               .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
9288               .fgt = FGT_MIDR_EL1,
9289               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
9290               .readfn = midr_read },
9291             /* crn = 0 op1 = 0 crm = 0 op2 = 7 : AArch32 aliases of MIDR */
9292             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
9293               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
9294               .access = PL1_R, .resetvalue = cpu->midr },
9295             { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
9296               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
9297               .access = PL1_R,
9298               .accessfn = access_aa64_tid1,
9299               .fgt = FGT_REVIDR_EL1,
9300               .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
9301         };
9302         ARMCPRegInfo id_v8_midr_alias_cp_reginfo = {
9303             .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST | ARM_CP_NO_GDB,
9304             .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
9305             .access = PL1_R, .resetvalue = cpu->midr
9306         };
9307         ARMCPRegInfo id_cp_reginfo[] = {
9308             /* These are common to v8 and pre-v8 */
9309             { .name = "CTR",
9310               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
9311               .access = PL1_R, .accessfn = ctr_el0_access,
9312               .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
9313             { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
9314               .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
9315               .access = PL0_R, .accessfn = ctr_el0_access,
9316               .fgt = FGT_CTR_EL0,
9317               .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
9318             /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
9319             { .name = "TCMTR",
9320               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
9321               .access = PL1_R,
9322               .accessfn = access_aa32_tid1,
9323               .type = ARM_CP_CONST, .resetvalue = 0 },
9324         };
9325         /* TLBTR is specific to VMSA */
9326         ARMCPRegInfo id_tlbtr_reginfo = {
9327               .name = "TLBTR",
9328               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
9329               .access = PL1_R,
9330               .accessfn = access_aa32_tid1,
9331               .type = ARM_CP_CONST, .resetvalue = 0,
9332         };
9333         /* MPUIR is specific to PMSA V6+ */
9334         ARMCPRegInfo id_mpuir_reginfo = {
9335               .name = "MPUIR",
9336               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
9337               .access = PL1_R, .type = ARM_CP_CONST,
9338               .resetvalue = cpu->pmsav7_dregion << 8
9339         };
9340         /* HMPUIR is specific to PMSA V8 */
9341         ARMCPRegInfo id_hmpuir_reginfo = {
9342             .name = "HMPUIR",
9343             .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 4,
9344             .access = PL2_R, .type = ARM_CP_CONST,
9345             .resetvalue = cpu->pmsav8r_hdregion
9346         };
9347         static const ARMCPRegInfo crn0_wi_reginfo = {
9348             .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
9349             .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
9350             .type = ARM_CP_NOP | ARM_CP_OVERRIDE
9351         };
9352 #ifdef CONFIG_USER_ONLY
9353         static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
9354             { .name = "MIDR_EL1",
9355               .exported_bits = R_MIDR_EL1_REVISION_MASK |
9356                                R_MIDR_EL1_PARTNUM_MASK |
9357                                R_MIDR_EL1_ARCHITECTURE_MASK |
9358                                R_MIDR_EL1_VARIANT_MASK |
9359                                R_MIDR_EL1_IMPLEMENTER_MASK },
9360             { .name = "REVIDR_EL1" },
9361         };
9362         modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
9363 #endif
9364         if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
9365             arm_feature(env, ARM_FEATURE_STRONGARM)) {
9366             size_t i;
9367             /*
9368              * Register the blanket "writes ignored" value first to cover the
9369              * whole space. Then update the specific ID registers to allow write
9370              * access, so that they ignore writes rather than causing them to
9371              * UNDEF.
9372              */
9373             define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
9374             for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) {
9375                 id_pre_v8_midr_cp_reginfo[i].access = PL1_RW;
9376             }
9377             for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) {
9378                 id_cp_reginfo[i].access = PL1_RW;
9379             }
9380             id_mpuir_reginfo.access = PL1_RW;
9381             id_tlbtr_reginfo.access = PL1_RW;
9382         }
9383         if (arm_feature(env, ARM_FEATURE_V8)) {
9384             define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
9385             if (!arm_feature(env, ARM_FEATURE_PMSA)) {
9386                 define_one_arm_cp_reg(cpu, &id_v8_midr_alias_cp_reginfo);
9387             }
9388         } else {
9389             define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
9390         }
9391         define_arm_cp_regs(cpu, id_cp_reginfo);
9392         if (!arm_feature(env, ARM_FEATURE_PMSA)) {
9393             define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
9394         } else if (arm_feature(env, ARM_FEATURE_PMSA) &&
9395                    arm_feature(env, ARM_FEATURE_V8)) {
9396             uint32_t i = 0;
9397             char *tmp_string;
9398 
9399             define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
9400             define_one_arm_cp_reg(cpu, &id_hmpuir_reginfo);
9401             define_arm_cp_regs(cpu, pmsav8r_cp_reginfo);
9402 
9403             /* Register alias is only valid for first 32 indexes */
9404             for (i = 0; i < MIN(cpu->pmsav7_dregion, 32); ++i) {
9405                 uint8_t crm = 0b1000 | extract32(i, 1, 3);
9406                 uint8_t opc1 = extract32(i, 4, 1);
9407                 uint8_t opc2 = extract32(i, 0, 1) << 2;
9408 
9409                 tmp_string = g_strdup_printf("PRBAR%u", i);
9410                 ARMCPRegInfo tmp_prbarn_reginfo = {
9411                     .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
9412                     .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
9413                     .access = PL1_RW, .resetvalue = 0,
9414                     .accessfn = access_tvm_trvm,
9415                     .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
9416                 };
9417                 define_one_arm_cp_reg(cpu, &tmp_prbarn_reginfo);
9418                 g_free(tmp_string);
9419 
9420                 opc2 = extract32(i, 0, 1) << 2 | 0x1;
9421                 tmp_string = g_strdup_printf("PRLAR%u", i);
9422                 ARMCPRegInfo tmp_prlarn_reginfo = {
9423                     .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
9424                     .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
9425                     .access = PL1_RW, .resetvalue = 0,
9426                     .accessfn = access_tvm_trvm,
9427                     .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
9428                 };
9429                 define_one_arm_cp_reg(cpu, &tmp_prlarn_reginfo);
9430                 g_free(tmp_string);
9431             }
9432 
9433             /* Register alias is only valid for first 32 indexes */
9434             for (i = 0; i < MIN(cpu->pmsav8r_hdregion, 32); ++i) {
9435                 uint8_t crm = 0b1000 | extract32(i, 1, 3);
9436                 uint8_t opc1 = 0b100 | extract32(i, 4, 1);
9437                 uint8_t opc2 = extract32(i, 0, 1) << 2;
9438 
9439                 tmp_string = g_strdup_printf("HPRBAR%u", i);
9440                 ARMCPRegInfo tmp_hprbarn_reginfo = {
9441                     .name = tmp_string,
9442                     .type = ARM_CP_NO_RAW,
9443                     .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
9444                     .access = PL2_RW, .resetvalue = 0,
9445                     .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
9446                 };
9447                 define_one_arm_cp_reg(cpu, &tmp_hprbarn_reginfo);
9448                 g_free(tmp_string);
9449 
9450                 opc2 = extract32(i, 0, 1) << 2 | 0x1;
9451                 tmp_string = g_strdup_printf("HPRLAR%u", i);
9452                 ARMCPRegInfo tmp_hprlarn_reginfo = {
9453                     .name = tmp_string,
9454                     .type = ARM_CP_NO_RAW,
9455                     .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
9456                     .access = PL2_RW, .resetvalue = 0,
9457                     .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
9458                 };
9459                 define_one_arm_cp_reg(cpu, &tmp_hprlarn_reginfo);
9460                 g_free(tmp_string);
9461             }
9462         } else if (arm_feature(env, ARM_FEATURE_V7)) {
9463             define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
9464         }
9465     }
9466 
9467     if (arm_feature(env, ARM_FEATURE_MPIDR)) {
9468         ARMCPRegInfo mpidr_cp_reginfo[] = {
9469             { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
9470               .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
9471               .fgt = FGT_MPIDR_EL1,
9472               .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
9473         };
9474 #ifdef CONFIG_USER_ONLY
9475         static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
9476             { .name = "MPIDR_EL1",
9477               .fixed_bits = 0x0000000080000000 },
9478         };
9479         modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
9480 #endif
9481         define_arm_cp_regs(cpu, mpidr_cp_reginfo);
9482     }
9483 
9484     if (arm_feature(env, ARM_FEATURE_AUXCR)) {
9485         ARMCPRegInfo auxcr_reginfo[] = {
9486             { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
9487               .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
9488               .access = PL1_RW, .accessfn = access_tacr,
9489               .nv2_redirect_offset = 0x118,
9490               .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
9491             { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
9492               .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
9493               .access = PL2_RW, .type = ARM_CP_CONST,
9494               .resetvalue = 0 },
9495             { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
9496               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
9497               .access = PL3_RW, .type = ARM_CP_CONST,
9498               .resetvalue = 0 },
9499         };
9500         define_arm_cp_regs(cpu, auxcr_reginfo);
9501         if (cpu_isar_feature(aa32_ac2, cpu)) {
9502             define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
9503         }
9504     }
9505 
9506     if (arm_feature(env, ARM_FEATURE_CBAR)) {
9507         /*
9508          * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
9509          * There are two flavours:
9510          *  (1) older 32-bit only cores have a simple 32-bit CBAR
9511          *  (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
9512          *      32-bit register visible to AArch32 at a different encoding
9513          *      to the "flavour 1" register and with the bits rearranged to
9514          *      be able to squash a 64-bit address into the 32-bit view.
9515          * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
9516          * in future if we support AArch32-only configs of some of the
9517          * AArch64 cores we might need to add a specific feature flag
9518          * to indicate cores with "flavour 2" CBAR.
9519          */
9520         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
9521             /* 32 bit view is [31:18] 0...0 [43:32]. */
9522             uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
9523                 | extract64(cpu->reset_cbar, 32, 12);
9524             ARMCPRegInfo cbar_reginfo[] = {
9525                 { .name = "CBAR",
9526                   .type = ARM_CP_CONST,
9527                   .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
9528                   .access = PL1_R, .resetvalue = cbar32 },
9529                 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
9530                   .type = ARM_CP_CONST,
9531                   .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
9532                   .access = PL1_R, .resetvalue = cpu->reset_cbar },
9533             };
9534             /* We don't implement a r/w 64 bit CBAR currently */
9535             assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
9536             define_arm_cp_regs(cpu, cbar_reginfo);
9537         } else {
9538             ARMCPRegInfo cbar = {
9539                 .name = "CBAR",
9540                 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
9541                 .access = PL1_R | PL3_W, .resetvalue = cpu->reset_cbar,
9542                 .fieldoffset = offsetof(CPUARMState,
9543                                         cp15.c15_config_base_address)
9544             };
9545             if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
9546                 cbar.access = PL1_R;
9547                 cbar.fieldoffset = 0;
9548                 cbar.type = ARM_CP_CONST;
9549             }
9550             define_one_arm_cp_reg(cpu, &cbar);
9551         }
9552     }
9553 
9554     if (arm_feature(env, ARM_FEATURE_VBAR)) {
9555         static const ARMCPRegInfo vbar_cp_reginfo[] = {
9556             { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
9557               .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
9558               .access = PL1_RW, .writefn = vbar_write,
9559               .accessfn = access_nv1,
9560               .fgt = FGT_VBAR_EL1,
9561               .nv2_redirect_offset = 0x250 | NV2_REDIR_NV1,
9562               .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
9563                                      offsetof(CPUARMState, cp15.vbar_ns) },
9564               .resetvalue = 0 },
9565         };
9566         define_arm_cp_regs(cpu, vbar_cp_reginfo);
9567     }
9568 
9569     /* Generic registers whose values depend on the implementation */
9570     {
9571         ARMCPRegInfo sctlr = {
9572             .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
9573             .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
9574             .access = PL1_RW, .accessfn = access_tvm_trvm,
9575             .fgt = FGT_SCTLR_EL1,
9576             .nv2_redirect_offset = 0x110 | NV2_REDIR_NV1,
9577             .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
9578                                    offsetof(CPUARMState, cp15.sctlr_ns) },
9579             .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
9580             .raw_writefn = raw_write,
9581         };
9582         if (arm_feature(env, ARM_FEATURE_XSCALE)) {
9583             /*
9584              * Normally we would always end the TB on an SCTLR write, but Linux
9585              * arch/arm/mach-pxa/sleep.S expects two instructions following
9586              * an MMU enable to execute from cache.  Imitate this behaviour.
9587              */
9588             sctlr.type |= ARM_CP_SUPPRESS_TB_END;
9589         }
9590         define_one_arm_cp_reg(cpu, &sctlr);
9591 
9592         if (arm_feature(env, ARM_FEATURE_PMSA) &&
9593             arm_feature(env, ARM_FEATURE_V8)) {
9594             ARMCPRegInfo vsctlr = {
9595                 .name = "VSCTLR", .state = ARM_CP_STATE_AA32,
9596                 .cp = 15, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
9597                 .access = PL2_RW, .resetvalue = 0x0,
9598                 .fieldoffset = offsetoflow32(CPUARMState, cp15.vsctlr),
9599             };
9600             define_one_arm_cp_reg(cpu, &vsctlr);
9601         }
9602     }
9603 
9604     if (cpu_isar_feature(aa64_lor, cpu)) {
9605         define_arm_cp_regs(cpu, lor_reginfo);
9606     }
9607     if (cpu_isar_feature(aa64_pan, cpu)) {
9608         define_one_arm_cp_reg(cpu, &pan_reginfo);
9609     }
9610 #ifndef CONFIG_USER_ONLY
9611     if (cpu_isar_feature(aa64_ats1e1, cpu)) {
9612         define_arm_cp_regs(cpu, ats1e1_reginfo);
9613     }
9614     if (cpu_isar_feature(aa32_ats1e1, cpu)) {
9615         define_arm_cp_regs(cpu, ats1cp_reginfo);
9616     }
9617 #endif
9618     if (cpu_isar_feature(aa64_uao, cpu)) {
9619         define_one_arm_cp_reg(cpu, &uao_reginfo);
9620     }
9621 
9622     if (cpu_isar_feature(aa64_dit, cpu)) {
9623         define_one_arm_cp_reg(cpu, &dit_reginfo);
9624     }
9625     if (cpu_isar_feature(aa64_ssbs, cpu)) {
9626         define_one_arm_cp_reg(cpu, &ssbs_reginfo);
9627     }
9628     if (cpu_isar_feature(any_ras, cpu)) {
9629         define_arm_cp_regs(cpu, minimal_ras_reginfo);
9630     }
9631 
9632     if (cpu_isar_feature(aa64_vh, cpu) ||
9633         cpu_isar_feature(aa64_debugv8p2, cpu)) {
9634         define_one_arm_cp_reg(cpu, &contextidr_el2);
9635     }
9636     if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
9637         define_arm_cp_regs(cpu, vhe_reginfo);
9638     }
9639 
9640     if (cpu_isar_feature(aa64_sve, cpu)) {
9641         define_arm_cp_regs(cpu, zcr_reginfo);
9642     }
9643 
9644     if (cpu_isar_feature(aa64_hcx, cpu)) {
9645         define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
9646     }
9647 
9648 #ifdef TARGET_AARCH64
9649     if (cpu_isar_feature(aa64_sme, cpu)) {
9650         define_arm_cp_regs(cpu, sme_reginfo);
9651     }
9652     if (cpu_isar_feature(aa64_pauth, cpu)) {
9653         define_arm_cp_regs(cpu, pauth_reginfo);
9654     }
9655     if (cpu_isar_feature(aa64_rndr, cpu)) {
9656         define_arm_cp_regs(cpu, rndr_reginfo);
9657     }
9658     if (cpu_isar_feature(aa64_tlbirange, cpu)) {
9659         define_arm_cp_regs(cpu, tlbirange_reginfo);
9660     }
9661     if (cpu_isar_feature(aa64_tlbios, cpu)) {
9662         define_arm_cp_regs(cpu, tlbios_reginfo);
9663     }
9664     /* Data Cache clean instructions up to PoP */
9665     if (cpu_isar_feature(aa64_dcpop, cpu)) {
9666         define_one_arm_cp_reg(cpu, dcpop_reg);
9667 
9668         if (cpu_isar_feature(aa64_dcpodp, cpu)) {
9669             define_one_arm_cp_reg(cpu, dcpodp_reg);
9670         }
9671     }
9672 
9673     /*
9674      * If full MTE is enabled, add all of the system registers.
9675      * If only "instructions available at EL0" are enabled,
9676      * then define only a RAZ/WI version of PSTATE.TCO.
9677      */
9678     if (cpu_isar_feature(aa64_mte, cpu)) {
9679         ARMCPRegInfo gmid_reginfo = {
9680             .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
9681             .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
9682             .access = PL1_R, .accessfn = access_aa64_tid5,
9683             .type = ARM_CP_CONST, .resetvalue = cpu->gm_blocksize,
9684         };
9685         define_one_arm_cp_reg(cpu, &gmid_reginfo);
9686         define_arm_cp_regs(cpu, mte_reginfo);
9687         define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
9688     } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
9689         define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
9690         define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
9691     }
9692 
9693     if (cpu_isar_feature(aa64_scxtnum, cpu)) {
9694         define_arm_cp_regs(cpu, scxtnum_reginfo);
9695     }
9696 
9697     if (cpu_isar_feature(aa64_fgt, cpu)) {
9698         define_arm_cp_regs(cpu, fgt_reginfo);
9699     }
9700 
9701     if (cpu_isar_feature(aa64_rme, cpu)) {
9702         define_arm_cp_regs(cpu, rme_reginfo);
9703         if (cpu_isar_feature(aa64_mte, cpu)) {
9704             define_arm_cp_regs(cpu, rme_mte_reginfo);
9705         }
9706     }
9707 
9708     if (cpu_isar_feature(aa64_nv2, cpu)) {
9709         define_arm_cp_regs(cpu, nv2_reginfo);
9710     }
9711 #endif
9712 
9713     if (cpu_isar_feature(any_predinv, cpu)) {
9714         define_arm_cp_regs(cpu, predinv_reginfo);
9715     }
9716 
9717     if (cpu_isar_feature(any_ccidx, cpu)) {
9718         define_arm_cp_regs(cpu, ccsidr2_reginfo);
9719     }
9720 
9721 #ifndef CONFIG_USER_ONLY
9722     /*
9723      * Register redirections and aliases must be done last,
9724      * after the registers from the other extensions have been defined.
9725      */
9726     if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
9727         define_arm_vh_e2h_redirects_aliases(cpu);
9728     }
9729 #endif
9730 }
9731 
9732 /*
9733  * Private utility function for define_one_arm_cp_reg_with_opaque():
9734  * add a single reginfo struct to the hash table.
9735  */
9736 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
9737                                    void *opaque, CPState state,
9738                                    CPSecureState secstate,
9739                                    int crm, int opc1, int opc2,
9740                                    const char *name)
9741 {
9742     CPUARMState *env = &cpu->env;
9743     uint32_t key;
9744     ARMCPRegInfo *r2;
9745     bool is64 = r->type & ARM_CP_64BIT;
9746     bool ns = secstate & ARM_CP_SECSTATE_NS;
9747     int cp = r->cp;
9748     size_t name_len;
9749     bool make_const;
9750 
9751     switch (state) {
9752     case ARM_CP_STATE_AA32:
9753         /* We assume it is a cp15 register if the .cp field is left unset. */
9754         if (cp == 0 && r->state == ARM_CP_STATE_BOTH) {
9755             cp = 15;
9756         }
9757         key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2);
9758         break;
9759     case ARM_CP_STATE_AA64:
9760         /*
9761          * To allow abbreviation of ARMCPRegInfo definitions, we treat
9762          * cp == 0 as equivalent to the value for "standard guest-visible
9763          * sysreg".  STATE_BOTH definitions are also always "standard sysreg"
9764          * in their AArch64 view (the .cp value may be non-zero for the
9765          * benefit of the AArch32 view).
9766          */
9767         if (cp == 0 || r->state == ARM_CP_STATE_BOTH) {
9768             cp = CP_REG_ARM64_SYSREG_CP;
9769         }
9770         key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2);
9771         break;
9772     default:
9773         g_assert_not_reached();
9774     }
9775 
9776     /* Overriding of an existing definition must be explicitly requested. */
9777     if (!(r->type & ARM_CP_OVERRIDE)) {
9778         const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key);
9779         if (oldreg) {
9780             assert(oldreg->type & ARM_CP_OVERRIDE);
9781         }
9782     }
9783 
9784     /*
9785      * Eliminate registers that are not present because the EL is missing.
9786      * Doing this here makes it easier to put all registers for a given
9787      * feature into the same ARMCPRegInfo array and define them all at once.
9788      */
9789     make_const = false;
9790     if (arm_feature(env, ARM_FEATURE_EL3)) {
9791         /*
9792          * An EL2 register without EL2 but with EL3 is (usually) RES0.
9793          * See rule RJFFP in section D1.1.3 of DDI0487H.a.
9794          */
9795         int min_el = ctz32(r->access) / 2;
9796         if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
9797             if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
9798                 return;
9799             }
9800             make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP);
9801         }
9802     } else {
9803         CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
9804                                  ? PL2_RW : PL1_RW);
9805         if ((r->access & max_el) == 0) {
9806             return;
9807         }
9808     }
9809 
9810     /* Combine cpreg and name into one allocation. */
9811     name_len = strlen(name) + 1;
9812     r2 = g_malloc(sizeof(*r2) + name_len);
9813     *r2 = *r;
9814     r2->name = memcpy(r2 + 1, name, name_len);
9815 
9816     /*
9817      * Update fields to match the instantiation, overwiting wildcards
9818      * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH.
9819      */
9820     r2->cp = cp;
9821     r2->crm = crm;
9822     r2->opc1 = opc1;
9823     r2->opc2 = opc2;
9824     r2->state = state;
9825     r2->secure = secstate;
9826     if (opaque) {
9827         r2->opaque = opaque;
9828     }
9829 
9830     if (make_const) {
9831         /* This should not have been a very special register to begin. */
9832         int old_special = r2->type & ARM_CP_SPECIAL_MASK;
9833         assert(old_special == 0 || old_special == ARM_CP_NOP);
9834         /*
9835          * Set the special function to CONST, retaining the other flags.
9836          * This is important for e.g. ARM_CP_SVE so that we still
9837          * take the SVE trap if CPTR_EL3.EZ == 0.
9838          */
9839         r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
9840         /*
9841          * Usually, these registers become RES0, but there are a few
9842          * special cases like VPIDR_EL2 which have a constant non-zero
9843          * value with writes ignored.
9844          */
9845         if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
9846             r2->resetvalue = 0;
9847         }
9848         /*
9849          * ARM_CP_CONST has precedence, so removing the callbacks and
9850          * offsets are not strictly necessary, but it is potentially
9851          * less confusing to debug later.
9852          */
9853         r2->readfn = NULL;
9854         r2->writefn = NULL;
9855         r2->raw_readfn = NULL;
9856         r2->raw_writefn = NULL;
9857         r2->resetfn = NULL;
9858         r2->fieldoffset = 0;
9859         r2->bank_fieldoffsets[0] = 0;
9860         r2->bank_fieldoffsets[1] = 0;
9861     } else {
9862         bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
9863 
9864         if (isbanked) {
9865             /*
9866              * Register is banked (using both entries in array).
9867              * Overwriting fieldoffset as the array is only used to define
9868              * banked registers but later only fieldoffset is used.
9869              */
9870             r2->fieldoffset = r->bank_fieldoffsets[ns];
9871         }
9872         if (state == ARM_CP_STATE_AA32) {
9873             if (isbanked) {
9874                 /*
9875                  * If the register is banked then we don't need to migrate or
9876                  * reset the 32-bit instance in certain cases:
9877                  *
9878                  * 1) If the register has both 32-bit and 64-bit instances
9879                  *    then we can count on the 64-bit instance taking care
9880                  *    of the non-secure bank.
9881                  * 2) If ARMv8 is enabled then we can count on a 64-bit
9882                  *    version taking care of the secure bank.  This requires
9883                  *    that separate 32 and 64-bit definitions are provided.
9884                  */
9885                 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
9886                     (arm_feature(env, ARM_FEATURE_V8) && !ns)) {
9887                     r2->type |= ARM_CP_ALIAS;
9888                 }
9889             } else if ((secstate != r->secure) && !ns) {
9890                 /*
9891                  * The register is not banked so we only want to allow
9892                  * migration of the non-secure instance.
9893                  */
9894                 r2->type |= ARM_CP_ALIAS;
9895             }
9896 
9897             if (HOST_BIG_ENDIAN &&
9898                 r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
9899                 r2->fieldoffset += sizeof(uint32_t);
9900             }
9901         }
9902     }
9903 
9904     /*
9905      * By convention, for wildcarded registers only the first
9906      * entry is used for migration; the others are marked as
9907      * ALIAS so we don't try to transfer the register
9908      * multiple times. Special registers (ie NOP/WFI) are
9909      * never migratable and not even raw-accessible.
9910      */
9911     if (r2->type & ARM_CP_SPECIAL_MASK) {
9912         r2->type |= ARM_CP_NO_RAW;
9913     }
9914     if (((r->crm == CP_ANY) && crm != 0) ||
9915         ((r->opc1 == CP_ANY) && opc1 != 0) ||
9916         ((r->opc2 == CP_ANY) && opc2 != 0)) {
9917         r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
9918     }
9919 
9920     /*
9921      * Check that raw accesses are either forbidden or handled. Note that
9922      * we can't assert this earlier because the setup of fieldoffset for
9923      * banked registers has to be done first.
9924      */
9925     if (!(r2->type & ARM_CP_NO_RAW)) {
9926         assert(!raw_accessors_invalid(r2));
9927     }
9928 
9929     g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2);
9930 }
9931 
9932 
9933 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
9934                                        const ARMCPRegInfo *r, void *opaque)
9935 {
9936     /*
9937      * Define implementations of coprocessor registers.
9938      * We store these in a hashtable because typically
9939      * there are less than 150 registers in a space which
9940      * is 16*16*16*8*8 = 262144 in size.
9941      * Wildcarding is supported for the crm, opc1 and opc2 fields.
9942      * If a register is defined twice then the second definition is
9943      * used, so this can be used to define some generic registers and
9944      * then override them with implementation specific variations.
9945      * At least one of the original and the second definition should
9946      * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
9947      * against accidental use.
9948      *
9949      * The state field defines whether the register is to be
9950      * visible in the AArch32 or AArch64 execution state. If the
9951      * state is set to ARM_CP_STATE_BOTH then we synthesise a
9952      * reginfo structure for the AArch32 view, which sees the lower
9953      * 32 bits of the 64 bit register.
9954      *
9955      * Only registers visible in AArch64 may set r->opc0; opc0 cannot
9956      * be wildcarded. AArch64 registers are always considered to be 64
9957      * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
9958      * the register, if any.
9959      */
9960     int crm, opc1, opc2;
9961     int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
9962     int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
9963     int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
9964     int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
9965     int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
9966     int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
9967     CPState state;
9968 
9969     /* 64 bit registers have only CRm and Opc1 fields */
9970     assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
9971     /* op0 only exists in the AArch64 encodings */
9972     assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
9973     /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
9974     assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
9975     /*
9976      * This API is only for Arm's system coprocessors (14 and 15) or
9977      * (M-profile or v7A-and-earlier only) for implementation defined
9978      * coprocessors in the range 0..7.  Our decode assumes this, since
9979      * 8..13 can be used for other insns including VFP and Neon. See
9980      * valid_cp() in translate.c.  Assert here that we haven't tried
9981      * to use an invalid coprocessor number.
9982      */
9983     switch (r->state) {
9984     case ARM_CP_STATE_BOTH:
9985         /* 0 has a special meaning, but otherwise the same rules as AA32. */
9986         if (r->cp == 0) {
9987             break;
9988         }
9989         /* fall through */
9990     case ARM_CP_STATE_AA32:
9991         if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
9992             !arm_feature(&cpu->env, ARM_FEATURE_M)) {
9993             assert(r->cp >= 14 && r->cp <= 15);
9994         } else {
9995             assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
9996         }
9997         break;
9998     case ARM_CP_STATE_AA64:
9999         assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
10000         break;
10001     default:
10002         g_assert_not_reached();
10003     }
10004     /*
10005      * The AArch64 pseudocode CheckSystemAccess() specifies that op1
10006      * encodes a minimum access level for the register. We roll this
10007      * runtime check into our general permission check code, so check
10008      * here that the reginfo's specified permissions are strict enough
10009      * to encompass the generic architectural permission check.
10010      */
10011     if (r->state != ARM_CP_STATE_AA32) {
10012         CPAccessRights mask;
10013         switch (r->opc1) {
10014         case 0:
10015             /* min_EL EL1, but some accessible to EL0 via kernel ABI */
10016             mask = PL0U_R | PL1_RW;
10017             break;
10018         case 1: case 2:
10019             /* min_EL EL1 */
10020             mask = PL1_RW;
10021             break;
10022         case 3:
10023             /* min_EL EL0 */
10024             mask = PL0_RW;
10025             break;
10026         case 4:
10027         case 5:
10028             /* min_EL EL2 */
10029             mask = PL2_RW;
10030             break;
10031         case 6:
10032             /* min_EL EL3 */
10033             mask = PL3_RW;
10034             break;
10035         case 7:
10036             /* min_EL EL1, secure mode only (we don't check the latter) */
10037             mask = PL1_RW;
10038             break;
10039         default:
10040             /* broken reginfo with out-of-range opc1 */
10041             g_assert_not_reached();
10042         }
10043         /* assert our permissions are not too lax (stricter is fine) */
10044         assert((r->access & ~mask) == 0);
10045     }
10046 
10047     /*
10048      * Check that the register definition has enough info to handle
10049      * reads and writes if they are permitted.
10050      */
10051     if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) {
10052         if (r->access & PL3_R) {
10053             assert((r->fieldoffset ||
10054                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
10055                    r->readfn);
10056         }
10057         if (r->access & PL3_W) {
10058             assert((r->fieldoffset ||
10059                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
10060                    r->writefn);
10061         }
10062     }
10063 
10064     for (crm = crmmin; crm <= crmmax; crm++) {
10065         for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
10066             for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
10067                 for (state = ARM_CP_STATE_AA32;
10068                      state <= ARM_CP_STATE_AA64; state++) {
10069                     if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
10070                         continue;
10071                     }
10072                     if (state == ARM_CP_STATE_AA32) {
10073                         /*
10074                          * Under AArch32 CP registers can be common
10075                          * (same for secure and non-secure world) or banked.
10076                          */
10077                         char *name;
10078 
10079                         switch (r->secure) {
10080                         case ARM_CP_SECSTATE_S:
10081                         case ARM_CP_SECSTATE_NS:
10082                             add_cpreg_to_hashtable(cpu, r, opaque, state,
10083                                                    r->secure, crm, opc1, opc2,
10084                                                    r->name);
10085                             break;
10086                         case ARM_CP_SECSTATE_BOTH:
10087                             name = g_strdup_printf("%s_S", r->name);
10088                             add_cpreg_to_hashtable(cpu, r, opaque, state,
10089                                                    ARM_CP_SECSTATE_S,
10090                                                    crm, opc1, opc2, name);
10091                             g_free(name);
10092                             add_cpreg_to_hashtable(cpu, r, opaque, state,
10093                                                    ARM_CP_SECSTATE_NS,
10094                                                    crm, opc1, opc2, r->name);
10095                             break;
10096                         default:
10097                             g_assert_not_reached();
10098                         }
10099                     } else {
10100                         /*
10101                          * AArch64 registers get mapped to non-secure instance
10102                          * of AArch32
10103                          */
10104                         add_cpreg_to_hashtable(cpu, r, opaque, state,
10105                                                ARM_CP_SECSTATE_NS,
10106                                                crm, opc1, opc2, r->name);
10107                     }
10108                 }
10109             }
10110         }
10111     }
10112 }
10113 
10114 /* Define a whole list of registers */
10115 void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs,
10116                                         void *opaque, size_t len)
10117 {
10118     size_t i;
10119     for (i = 0; i < len; ++i) {
10120         define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque);
10121     }
10122 }
10123 
10124 /*
10125  * Modify ARMCPRegInfo for access from userspace.
10126  *
10127  * This is a data driven modification directed by
10128  * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
10129  * user-space cannot alter any values and dynamic values pertaining to
10130  * execution state are hidden from user space view anyway.
10131  */
10132 void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
10133                                  const ARMCPRegUserSpaceInfo *mods,
10134                                  size_t mods_len)
10135 {
10136     for (size_t mi = 0; mi < mods_len; ++mi) {
10137         const ARMCPRegUserSpaceInfo *m = mods + mi;
10138         GPatternSpec *pat = NULL;
10139 
10140         if (m->is_glob) {
10141             pat = g_pattern_spec_new(m->name);
10142         }
10143         for (size_t ri = 0; ri < regs_len; ++ri) {
10144             ARMCPRegInfo *r = regs + ri;
10145 
10146             if (pat && g_pattern_match_string(pat, r->name)) {
10147                 r->type = ARM_CP_CONST;
10148                 r->access = PL0U_R;
10149                 r->resetvalue = 0;
10150                 /* continue */
10151             } else if (strcmp(r->name, m->name) == 0) {
10152                 r->type = ARM_CP_CONST;
10153                 r->access = PL0U_R;
10154                 r->resetvalue &= m->exported_bits;
10155                 r->resetvalue |= m->fixed_bits;
10156                 break;
10157             }
10158         }
10159         if (pat) {
10160             g_pattern_spec_free(pat);
10161         }
10162     }
10163 }
10164 
10165 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
10166 {
10167     return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp);
10168 }
10169 
10170 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
10171                          uint64_t value)
10172 {
10173     /* Helper coprocessor write function for write-ignore registers */
10174 }
10175 
10176 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
10177 {
10178     /* Helper coprocessor write function for read-as-zero registers */
10179     return 0;
10180 }
10181 
10182 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
10183 {
10184     /* Helper coprocessor reset function for do-nothing-on-reset registers */
10185 }
10186 
10187 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
10188 {
10189     /*
10190      * Return true if it is not valid for us to switch to
10191      * this CPU mode (ie all the UNPREDICTABLE cases in
10192      * the ARM ARM CPSRWriteByInstr pseudocode).
10193      */
10194 
10195     /* Changes to or from Hyp via MSR and CPS are illegal. */
10196     if (write_type == CPSRWriteByInstr &&
10197         ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
10198          mode == ARM_CPU_MODE_HYP)) {
10199         return 1;
10200     }
10201 
10202     switch (mode) {
10203     case ARM_CPU_MODE_USR:
10204         return 0;
10205     case ARM_CPU_MODE_SYS:
10206     case ARM_CPU_MODE_SVC:
10207     case ARM_CPU_MODE_ABT:
10208     case ARM_CPU_MODE_UND:
10209     case ARM_CPU_MODE_IRQ:
10210     case ARM_CPU_MODE_FIQ:
10211         /*
10212          * Note that we don't implement the IMPDEF NSACR.RFR which in v7
10213          * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
10214          */
10215         /*
10216          * If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
10217          * and CPS are treated as illegal mode changes.
10218          */
10219         if (write_type == CPSRWriteByInstr &&
10220             (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
10221             (arm_hcr_el2_eff(env) & HCR_TGE)) {
10222             return 1;
10223         }
10224         return 0;
10225     case ARM_CPU_MODE_HYP:
10226         return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
10227     case ARM_CPU_MODE_MON:
10228         return arm_current_el(env) < 3;
10229     default:
10230         return 1;
10231     }
10232 }
10233 
10234 uint32_t cpsr_read(CPUARMState *env)
10235 {
10236     int ZF;
10237     ZF = (env->ZF == 0);
10238     return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
10239         (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
10240         | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
10241         | ((env->condexec_bits & 0xfc) << 8)
10242         | (env->GE << 16) | (env->daif & CPSR_AIF);
10243 }
10244 
10245 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
10246                 CPSRWriteType write_type)
10247 {
10248     uint32_t changed_daif;
10249     bool rebuild_hflags = (write_type != CPSRWriteRaw) &&
10250         (mask & (CPSR_M | CPSR_E | CPSR_IL));
10251 
10252     if (mask & CPSR_NZCV) {
10253         env->ZF = (~val) & CPSR_Z;
10254         env->NF = val;
10255         env->CF = (val >> 29) & 1;
10256         env->VF = (val << 3) & 0x80000000;
10257     }
10258     if (mask & CPSR_Q) {
10259         env->QF = ((val & CPSR_Q) != 0);
10260     }
10261     if (mask & CPSR_T) {
10262         env->thumb = ((val & CPSR_T) != 0);
10263     }
10264     if (mask & CPSR_IT_0_1) {
10265         env->condexec_bits &= ~3;
10266         env->condexec_bits |= (val >> 25) & 3;
10267     }
10268     if (mask & CPSR_IT_2_7) {
10269         env->condexec_bits &= 3;
10270         env->condexec_bits |= (val >> 8) & 0xfc;
10271     }
10272     if (mask & CPSR_GE) {
10273         env->GE = (val >> 16) & 0xf;
10274     }
10275 
10276     /*
10277      * In a V7 implementation that includes the security extensions but does
10278      * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
10279      * whether non-secure software is allowed to change the CPSR_F and CPSR_A
10280      * bits respectively.
10281      *
10282      * In a V8 implementation, it is permitted for privileged software to
10283      * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
10284      */
10285     if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
10286         arm_feature(env, ARM_FEATURE_EL3) &&
10287         !arm_feature(env, ARM_FEATURE_EL2) &&
10288         !arm_is_secure(env)) {
10289 
10290         changed_daif = (env->daif ^ val) & mask;
10291 
10292         if (changed_daif & CPSR_A) {
10293             /*
10294              * Check to see if we are allowed to change the masking of async
10295              * abort exceptions from a non-secure state.
10296              */
10297             if (!(env->cp15.scr_el3 & SCR_AW)) {
10298                 qemu_log_mask(LOG_GUEST_ERROR,
10299                               "Ignoring attempt to switch CPSR_A flag from "
10300                               "non-secure world with SCR.AW bit clear\n");
10301                 mask &= ~CPSR_A;
10302             }
10303         }
10304 
10305         if (changed_daif & CPSR_F) {
10306             /*
10307              * Check to see if we are allowed to change the masking of FIQ
10308              * exceptions from a non-secure state.
10309              */
10310             if (!(env->cp15.scr_el3 & SCR_FW)) {
10311                 qemu_log_mask(LOG_GUEST_ERROR,
10312                               "Ignoring attempt to switch CPSR_F flag from "
10313                               "non-secure world with SCR.FW bit clear\n");
10314                 mask &= ~CPSR_F;
10315             }
10316 
10317             /*
10318              * Check whether non-maskable FIQ (NMFI) support is enabled.
10319              * If this bit is set software is not allowed to mask
10320              * FIQs, but is allowed to set CPSR_F to 0.
10321              */
10322             if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
10323                 (val & CPSR_F)) {
10324                 qemu_log_mask(LOG_GUEST_ERROR,
10325                               "Ignoring attempt to enable CPSR_F flag "
10326                               "(non-maskable FIQ [NMFI] support enabled)\n");
10327                 mask &= ~CPSR_F;
10328             }
10329         }
10330     }
10331 
10332     env->daif &= ~(CPSR_AIF & mask);
10333     env->daif |= val & CPSR_AIF & mask;
10334 
10335     if (write_type != CPSRWriteRaw &&
10336         ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
10337         if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
10338             /*
10339              * Note that we can only get here in USR mode if this is a
10340              * gdb stub write; for this case we follow the architectural
10341              * behaviour for guest writes in USR mode of ignoring an attempt
10342              * to switch mode. (Those are caught by translate.c for writes
10343              * triggered by guest instructions.)
10344              */
10345             mask &= ~CPSR_M;
10346         } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
10347             /*
10348              * Attempt to switch to an invalid mode: this is UNPREDICTABLE in
10349              * v7, and has defined behaviour in v8:
10350              *  + leave CPSR.M untouched
10351              *  + allow changes to the other CPSR fields
10352              *  + set PSTATE.IL
10353              * For user changes via the GDB stub, we don't set PSTATE.IL,
10354              * as this would be unnecessarily harsh for a user error.
10355              */
10356             mask &= ~CPSR_M;
10357             if (write_type != CPSRWriteByGDBStub &&
10358                 arm_feature(env, ARM_FEATURE_V8)) {
10359                 mask |= CPSR_IL;
10360                 val |= CPSR_IL;
10361             }
10362             qemu_log_mask(LOG_GUEST_ERROR,
10363                           "Illegal AArch32 mode switch attempt from %s to %s\n",
10364                           aarch32_mode_name(env->uncached_cpsr),
10365                           aarch32_mode_name(val));
10366         } else {
10367             qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
10368                           write_type == CPSRWriteExceptionReturn ?
10369                           "Exception return from AArch32" :
10370                           "AArch32 mode switch from",
10371                           aarch32_mode_name(env->uncached_cpsr),
10372                           aarch32_mode_name(val), env->regs[15]);
10373             switch_mode(env, val & CPSR_M);
10374         }
10375     }
10376     mask &= ~CACHED_CPSR_BITS;
10377     env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
10378     if (tcg_enabled() && rebuild_hflags) {
10379         arm_rebuild_hflags(env);
10380     }
10381 }
10382 
10383 #ifdef CONFIG_USER_ONLY
10384 
10385 static void switch_mode(CPUARMState *env, int mode)
10386 {
10387     ARMCPU *cpu = env_archcpu(env);
10388 
10389     if (mode != ARM_CPU_MODE_USR) {
10390         cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
10391     }
10392 }
10393 
10394 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
10395                                  uint32_t cur_el, bool secure)
10396 {
10397     return 1;
10398 }
10399 
10400 void aarch64_sync_64_to_32(CPUARMState *env)
10401 {
10402     g_assert_not_reached();
10403 }
10404 
10405 #else
10406 
10407 static void switch_mode(CPUARMState *env, int mode)
10408 {
10409     int old_mode;
10410     int i;
10411 
10412     old_mode = env->uncached_cpsr & CPSR_M;
10413     if (mode == old_mode) {
10414         return;
10415     }
10416 
10417     if (old_mode == ARM_CPU_MODE_FIQ) {
10418         memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
10419         memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
10420     } else if (mode == ARM_CPU_MODE_FIQ) {
10421         memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
10422         memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
10423     }
10424 
10425     i = bank_number(old_mode);
10426     env->banked_r13[i] = env->regs[13];
10427     env->banked_spsr[i] = env->spsr;
10428 
10429     i = bank_number(mode);
10430     env->regs[13] = env->banked_r13[i];
10431     env->spsr = env->banked_spsr[i];
10432 
10433     env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
10434     env->regs[14] = env->banked_r14[r14_bank_number(mode)];
10435 }
10436 
10437 /*
10438  * Physical Interrupt Target EL Lookup Table
10439  *
10440  * [ From ARM ARM section G1.13.4 (Table G1-15) ]
10441  *
10442  * The below multi-dimensional table is used for looking up the target
10443  * exception level given numerous condition criteria.  Specifically, the
10444  * target EL is based on SCR and HCR routing controls as well as the
10445  * currently executing EL and secure state.
10446  *
10447  *    Dimensions:
10448  *    target_el_table[2][2][2][2][2][4]
10449  *                    |  |  |  |  |  +--- Current EL
10450  *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
10451  *                    |  |  |  +--------- HCR mask override
10452  *                    |  |  +------------ SCR exec state control
10453  *                    |  +--------------- SCR mask override
10454  *                    +------------------ 32-bit(0)/64-bit(1) EL3
10455  *
10456  *    The table values are as such:
10457  *    0-3 = EL0-EL3
10458  *     -1 = Cannot occur
10459  *
10460  * The ARM ARM target EL table includes entries indicating that an "exception
10461  * is not taken".  The two cases where this is applicable are:
10462  *    1) An exception is taken from EL3 but the SCR does not have the exception
10463  *    routed to EL3.
10464  *    2) An exception is taken from EL2 but the HCR does not have the exception
10465  *    routed to EL2.
10466  * In these two cases, the below table contain a target of EL1.  This value is
10467  * returned as it is expected that the consumer of the table data will check
10468  * for "target EL >= current EL" to ensure the exception is not taken.
10469  *
10470  *            SCR     HCR
10471  *         64  EA     AMO                 From
10472  *        BIT IRQ     IMO      Non-secure         Secure
10473  *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
10474  */
10475 static const int8_t target_el_table[2][2][2][2][2][4] = {
10476     {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
10477        {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
10478       {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
10479        {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
10480      {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
10481        {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
10482       {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
10483        {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
10484     {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
10485        {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 2,  2, -1,  1 },},},
10486       {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1,  1,  1 },},
10487        {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 2,  2,  2,  1 },},},},
10488      {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
10489        {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
10490       {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},
10491        {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},},},},
10492 };
10493 
10494 /*
10495  * Determine the target EL for physical exceptions
10496  */
10497 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
10498                                  uint32_t cur_el, bool secure)
10499 {
10500     CPUARMState *env = cpu_env(cs);
10501     bool rw;
10502     bool scr;
10503     bool hcr;
10504     int target_el;
10505     /* Is the highest EL AArch64? */
10506     bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
10507     uint64_t hcr_el2;
10508 
10509     if (arm_feature(env, ARM_FEATURE_EL3)) {
10510         rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
10511     } else {
10512         /*
10513          * Either EL2 is the highest EL (and so the EL2 register width
10514          * is given by is64); or there is no EL2 or EL3, in which case
10515          * the value of 'rw' does not affect the table lookup anyway.
10516          */
10517         rw = is64;
10518     }
10519 
10520     hcr_el2 = arm_hcr_el2_eff(env);
10521     switch (excp_idx) {
10522     case EXCP_IRQ:
10523         scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
10524         hcr = hcr_el2 & HCR_IMO;
10525         break;
10526     case EXCP_FIQ:
10527         scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
10528         hcr = hcr_el2 & HCR_FMO;
10529         break;
10530     default:
10531         scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
10532         hcr = hcr_el2 & HCR_AMO;
10533         break;
10534     };
10535 
10536     /*
10537      * For these purposes, TGE and AMO/IMO/FMO both force the
10538      * interrupt to EL2.  Fold TGE into the bit extracted above.
10539      */
10540     hcr |= (hcr_el2 & HCR_TGE) != 0;
10541 
10542     /* Perform a table-lookup for the target EL given the current state */
10543     target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
10544 
10545     assert(target_el > 0);
10546 
10547     return target_el;
10548 }
10549 
10550 void arm_log_exception(CPUState *cs)
10551 {
10552     int idx = cs->exception_index;
10553 
10554     if (qemu_loglevel_mask(CPU_LOG_INT)) {
10555         const char *exc = NULL;
10556         static const char * const excnames[] = {
10557             [EXCP_UDEF] = "Undefined Instruction",
10558             [EXCP_SWI] = "SVC",
10559             [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
10560             [EXCP_DATA_ABORT] = "Data Abort",
10561             [EXCP_IRQ] = "IRQ",
10562             [EXCP_FIQ] = "FIQ",
10563             [EXCP_BKPT] = "Breakpoint",
10564             [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
10565             [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
10566             [EXCP_HVC] = "Hypervisor Call",
10567             [EXCP_HYP_TRAP] = "Hypervisor Trap",
10568             [EXCP_SMC] = "Secure Monitor Call",
10569             [EXCP_VIRQ] = "Virtual IRQ",
10570             [EXCP_VFIQ] = "Virtual FIQ",
10571             [EXCP_SEMIHOST] = "Semihosting call",
10572             [EXCP_NOCP] = "v7M NOCP UsageFault",
10573             [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
10574             [EXCP_STKOF] = "v8M STKOF UsageFault",
10575             [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
10576             [EXCP_LSERR] = "v8M LSERR UsageFault",
10577             [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
10578             [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
10579             [EXCP_VSERR] = "Virtual SERR",
10580             [EXCP_GPC] = "Granule Protection Check",
10581         };
10582 
10583         if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
10584             exc = excnames[idx];
10585         }
10586         if (!exc) {
10587             exc = "unknown";
10588         }
10589         qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n",
10590                       idx, exc, cs->cpu_index);
10591     }
10592 }
10593 
10594 /*
10595  * Function used to synchronize QEMU's AArch64 register set with AArch32
10596  * register set.  This is necessary when switching between AArch32 and AArch64
10597  * execution state.
10598  */
10599 void aarch64_sync_32_to_64(CPUARMState *env)
10600 {
10601     int i;
10602     uint32_t mode = env->uncached_cpsr & CPSR_M;
10603 
10604     /* We can blanket copy R[0:7] to X[0:7] */
10605     for (i = 0; i < 8; i++) {
10606         env->xregs[i] = env->regs[i];
10607     }
10608 
10609     /*
10610      * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
10611      * Otherwise, they come from the banked user regs.
10612      */
10613     if (mode == ARM_CPU_MODE_FIQ) {
10614         for (i = 8; i < 13; i++) {
10615             env->xregs[i] = env->usr_regs[i - 8];
10616         }
10617     } else {
10618         for (i = 8; i < 13; i++) {
10619             env->xregs[i] = env->regs[i];
10620         }
10621     }
10622 
10623     /*
10624      * Registers x13-x23 are the various mode SP and FP registers. Registers
10625      * r13 and r14 are only copied if we are in that mode, otherwise we copy
10626      * from the mode banked register.
10627      */
10628     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
10629         env->xregs[13] = env->regs[13];
10630         env->xregs[14] = env->regs[14];
10631     } else {
10632         env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
10633         /* HYP is an exception in that it is copied from r14 */
10634         if (mode == ARM_CPU_MODE_HYP) {
10635             env->xregs[14] = env->regs[14];
10636         } else {
10637             env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
10638         }
10639     }
10640 
10641     if (mode == ARM_CPU_MODE_HYP) {
10642         env->xregs[15] = env->regs[13];
10643     } else {
10644         env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
10645     }
10646 
10647     if (mode == ARM_CPU_MODE_IRQ) {
10648         env->xregs[16] = env->regs[14];
10649         env->xregs[17] = env->regs[13];
10650     } else {
10651         env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
10652         env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
10653     }
10654 
10655     if (mode == ARM_CPU_MODE_SVC) {
10656         env->xregs[18] = env->regs[14];
10657         env->xregs[19] = env->regs[13];
10658     } else {
10659         env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
10660         env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
10661     }
10662 
10663     if (mode == ARM_CPU_MODE_ABT) {
10664         env->xregs[20] = env->regs[14];
10665         env->xregs[21] = env->regs[13];
10666     } else {
10667         env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
10668         env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
10669     }
10670 
10671     if (mode == ARM_CPU_MODE_UND) {
10672         env->xregs[22] = env->regs[14];
10673         env->xregs[23] = env->regs[13];
10674     } else {
10675         env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
10676         env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
10677     }
10678 
10679     /*
10680      * Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
10681      * mode, then we can copy from r8-r14.  Otherwise, we copy from the
10682      * FIQ bank for r8-r14.
10683      */
10684     if (mode == ARM_CPU_MODE_FIQ) {
10685         for (i = 24; i < 31; i++) {
10686             env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
10687         }
10688     } else {
10689         for (i = 24; i < 29; i++) {
10690             env->xregs[i] = env->fiq_regs[i - 24];
10691         }
10692         env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
10693         env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
10694     }
10695 
10696     env->pc = env->regs[15];
10697 }
10698 
10699 /*
10700  * Function used to synchronize QEMU's AArch32 register set with AArch64
10701  * register set.  This is necessary when switching between AArch32 and AArch64
10702  * execution state.
10703  */
10704 void aarch64_sync_64_to_32(CPUARMState *env)
10705 {
10706     int i;
10707     uint32_t mode = env->uncached_cpsr & CPSR_M;
10708 
10709     /* We can blanket copy X[0:7] to R[0:7] */
10710     for (i = 0; i < 8; i++) {
10711         env->regs[i] = env->xregs[i];
10712     }
10713 
10714     /*
10715      * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
10716      * Otherwise, we copy x8-x12 into the banked user regs.
10717      */
10718     if (mode == ARM_CPU_MODE_FIQ) {
10719         for (i = 8; i < 13; i++) {
10720             env->usr_regs[i - 8] = env->xregs[i];
10721         }
10722     } else {
10723         for (i = 8; i < 13; i++) {
10724             env->regs[i] = env->xregs[i];
10725         }
10726     }
10727 
10728     /*
10729      * Registers r13 & r14 depend on the current mode.
10730      * If we are in a given mode, we copy the corresponding x registers to r13
10731      * and r14.  Otherwise, we copy the x register to the banked r13 and r14
10732      * for the mode.
10733      */
10734     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
10735         env->regs[13] = env->xregs[13];
10736         env->regs[14] = env->xregs[14];
10737     } else {
10738         env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
10739 
10740         /*
10741          * HYP is an exception in that it does not have its own banked r14 but
10742          * shares the USR r14
10743          */
10744         if (mode == ARM_CPU_MODE_HYP) {
10745             env->regs[14] = env->xregs[14];
10746         } else {
10747             env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
10748         }
10749     }
10750 
10751     if (mode == ARM_CPU_MODE_HYP) {
10752         env->regs[13] = env->xregs[15];
10753     } else {
10754         env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
10755     }
10756 
10757     if (mode == ARM_CPU_MODE_IRQ) {
10758         env->regs[14] = env->xregs[16];
10759         env->regs[13] = env->xregs[17];
10760     } else {
10761         env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
10762         env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
10763     }
10764 
10765     if (mode == ARM_CPU_MODE_SVC) {
10766         env->regs[14] = env->xregs[18];
10767         env->regs[13] = env->xregs[19];
10768     } else {
10769         env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
10770         env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
10771     }
10772 
10773     if (mode == ARM_CPU_MODE_ABT) {
10774         env->regs[14] = env->xregs[20];
10775         env->regs[13] = env->xregs[21];
10776     } else {
10777         env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
10778         env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
10779     }
10780 
10781     if (mode == ARM_CPU_MODE_UND) {
10782         env->regs[14] = env->xregs[22];
10783         env->regs[13] = env->xregs[23];
10784     } else {
10785         env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
10786         env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
10787     }
10788 
10789     /*
10790      * Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
10791      * mode, then we can copy to r8-r14.  Otherwise, we copy to the
10792      * FIQ bank for r8-r14.
10793      */
10794     if (mode == ARM_CPU_MODE_FIQ) {
10795         for (i = 24; i < 31; i++) {
10796             env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
10797         }
10798     } else {
10799         for (i = 24; i < 29; i++) {
10800             env->fiq_regs[i - 24] = env->xregs[i];
10801         }
10802         env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
10803         env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
10804     }
10805 
10806     env->regs[15] = env->pc;
10807 }
10808 
10809 static void take_aarch32_exception(CPUARMState *env, int new_mode,
10810                                    uint32_t mask, uint32_t offset,
10811                                    uint32_t newpc)
10812 {
10813     int new_el;
10814 
10815     /* Change the CPU state so as to actually take the exception. */
10816     switch_mode(env, new_mode);
10817 
10818     /*
10819      * For exceptions taken to AArch32 we must clear the SS bit in both
10820      * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
10821      */
10822     env->pstate &= ~PSTATE_SS;
10823     env->spsr = cpsr_read(env);
10824     /* Clear IT bits.  */
10825     env->condexec_bits = 0;
10826     /* Switch to the new mode, and to the correct instruction set.  */
10827     env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
10828 
10829     /* This must be after mode switching. */
10830     new_el = arm_current_el(env);
10831 
10832     /* Set new mode endianness */
10833     env->uncached_cpsr &= ~CPSR_E;
10834     if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
10835         env->uncached_cpsr |= CPSR_E;
10836     }
10837     /* J and IL must always be cleared for exception entry */
10838     env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
10839     env->daif |= mask;
10840 
10841     if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) {
10842         if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) {
10843             env->uncached_cpsr |= CPSR_SSBS;
10844         } else {
10845             env->uncached_cpsr &= ~CPSR_SSBS;
10846         }
10847     }
10848 
10849     if (new_mode == ARM_CPU_MODE_HYP) {
10850         env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
10851         env->elr_el[2] = env->regs[15];
10852     } else {
10853         /* CPSR.PAN is normally preserved preserved unless...  */
10854         if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
10855             switch (new_el) {
10856             case 3:
10857                 if (!arm_is_secure_below_el3(env)) {
10858                     /* ... the target is EL3, from non-secure state.  */
10859                     env->uncached_cpsr &= ~CPSR_PAN;
10860                     break;
10861                 }
10862                 /* ... the target is EL3, from secure state ... */
10863                 /* fall through */
10864             case 1:
10865                 /* ... the target is EL1 and SCTLR.SPAN is 0.  */
10866                 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
10867                     env->uncached_cpsr |= CPSR_PAN;
10868                 }
10869                 break;
10870             }
10871         }
10872         /*
10873          * this is a lie, as there was no c1_sys on V4T/V5, but who cares
10874          * and we should just guard the thumb mode on V4
10875          */
10876         if (arm_feature(env, ARM_FEATURE_V4T)) {
10877             env->thumb =
10878                 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
10879         }
10880         env->regs[14] = env->regs[15] + offset;
10881     }
10882     env->regs[15] = newpc;
10883 
10884     if (tcg_enabled()) {
10885         arm_rebuild_hflags(env);
10886     }
10887 }
10888 
10889 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
10890 {
10891     /*
10892      * Handle exception entry to Hyp mode; this is sufficiently
10893      * different to entry to other AArch32 modes that we handle it
10894      * separately here.
10895      *
10896      * The vector table entry used is always the 0x14 Hyp mode entry point,
10897      * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
10898      * The offset applied to the preferred return address is always zero
10899      * (see DDI0487C.a section G1.12.3).
10900      * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
10901      */
10902     uint32_t addr, mask;
10903     ARMCPU *cpu = ARM_CPU(cs);
10904     CPUARMState *env = &cpu->env;
10905 
10906     switch (cs->exception_index) {
10907     case EXCP_UDEF:
10908         addr = 0x04;
10909         break;
10910     case EXCP_SWI:
10911         addr = 0x08;
10912         break;
10913     case EXCP_BKPT:
10914         /* Fall through to prefetch abort.  */
10915     case EXCP_PREFETCH_ABORT:
10916         env->cp15.ifar_s = env->exception.vaddress;
10917         qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
10918                       (uint32_t)env->exception.vaddress);
10919         addr = 0x0c;
10920         break;
10921     case EXCP_DATA_ABORT:
10922         env->cp15.dfar_s = env->exception.vaddress;
10923         qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
10924                       (uint32_t)env->exception.vaddress);
10925         addr = 0x10;
10926         break;
10927     case EXCP_IRQ:
10928         addr = 0x18;
10929         break;
10930     case EXCP_FIQ:
10931         addr = 0x1c;
10932         break;
10933     case EXCP_HVC:
10934         addr = 0x08;
10935         break;
10936     case EXCP_HYP_TRAP:
10937         addr = 0x14;
10938         break;
10939     default:
10940         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
10941     }
10942 
10943     if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
10944         if (!arm_feature(env, ARM_FEATURE_V8)) {
10945             /*
10946              * QEMU syndrome values are v8-style. v7 has the IL bit
10947              * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
10948              * If this is a v7 CPU, squash the IL bit in those cases.
10949              */
10950             if (cs->exception_index == EXCP_PREFETCH_ABORT ||
10951                 (cs->exception_index == EXCP_DATA_ABORT &&
10952                  !(env->exception.syndrome & ARM_EL_ISV)) ||
10953                 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
10954                 env->exception.syndrome &= ~ARM_EL_IL;
10955             }
10956         }
10957         env->cp15.esr_el[2] = env->exception.syndrome;
10958     }
10959 
10960     if (arm_current_el(env) != 2 && addr < 0x14) {
10961         addr = 0x14;
10962     }
10963 
10964     mask = 0;
10965     if (!(env->cp15.scr_el3 & SCR_EA)) {
10966         mask |= CPSR_A;
10967     }
10968     if (!(env->cp15.scr_el3 & SCR_IRQ)) {
10969         mask |= CPSR_I;
10970     }
10971     if (!(env->cp15.scr_el3 & SCR_FIQ)) {
10972         mask |= CPSR_F;
10973     }
10974 
10975     addr += env->cp15.hvbar;
10976 
10977     take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
10978 }
10979 
10980 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
10981 {
10982     ARMCPU *cpu = ARM_CPU(cs);
10983     CPUARMState *env = &cpu->env;
10984     uint32_t addr;
10985     uint32_t mask;
10986     int new_mode;
10987     uint32_t offset;
10988     uint32_t moe;
10989 
10990     /* If this is a debug exception we must update the DBGDSCR.MOE bits */
10991     switch (syn_get_ec(env->exception.syndrome)) {
10992     case EC_BREAKPOINT:
10993     case EC_BREAKPOINT_SAME_EL:
10994         moe = 1;
10995         break;
10996     case EC_WATCHPOINT:
10997     case EC_WATCHPOINT_SAME_EL:
10998         moe = 10;
10999         break;
11000     case EC_AA32_BKPT:
11001         moe = 3;
11002         break;
11003     case EC_VECTORCATCH:
11004         moe = 5;
11005         break;
11006     default:
11007         moe = 0;
11008         break;
11009     }
11010 
11011     if (moe) {
11012         env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
11013     }
11014 
11015     if (env->exception.target_el == 2) {
11016         arm_cpu_do_interrupt_aarch32_hyp(cs);
11017         return;
11018     }
11019 
11020     switch (cs->exception_index) {
11021     case EXCP_UDEF:
11022         new_mode = ARM_CPU_MODE_UND;
11023         addr = 0x04;
11024         mask = CPSR_I;
11025         if (env->thumb) {
11026             offset = 2;
11027         } else {
11028             offset = 4;
11029         }
11030         break;
11031     case EXCP_SWI:
11032         new_mode = ARM_CPU_MODE_SVC;
11033         addr = 0x08;
11034         mask = CPSR_I;
11035         /* The PC already points to the next instruction.  */
11036         offset = 0;
11037         break;
11038     case EXCP_BKPT:
11039         /* Fall through to prefetch abort.  */
11040     case EXCP_PREFETCH_ABORT:
11041         A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
11042         A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
11043         qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
11044                       env->exception.fsr, (uint32_t)env->exception.vaddress);
11045         new_mode = ARM_CPU_MODE_ABT;
11046         addr = 0x0c;
11047         mask = CPSR_A | CPSR_I;
11048         offset = 4;
11049         break;
11050     case EXCP_DATA_ABORT:
11051         A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
11052         A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
11053         qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
11054                       env->exception.fsr,
11055                       (uint32_t)env->exception.vaddress);
11056         new_mode = ARM_CPU_MODE_ABT;
11057         addr = 0x10;
11058         mask = CPSR_A | CPSR_I;
11059         offset = 8;
11060         break;
11061     case EXCP_IRQ:
11062         new_mode = ARM_CPU_MODE_IRQ;
11063         addr = 0x18;
11064         /* Disable IRQ and imprecise data aborts.  */
11065         mask = CPSR_A | CPSR_I;
11066         offset = 4;
11067         if (env->cp15.scr_el3 & SCR_IRQ) {
11068             /* IRQ routed to monitor mode */
11069             new_mode = ARM_CPU_MODE_MON;
11070             mask |= CPSR_F;
11071         }
11072         break;
11073     case EXCP_FIQ:
11074         new_mode = ARM_CPU_MODE_FIQ;
11075         addr = 0x1c;
11076         /* Disable FIQ, IRQ and imprecise data aborts.  */
11077         mask = CPSR_A | CPSR_I | CPSR_F;
11078         if (env->cp15.scr_el3 & SCR_FIQ) {
11079             /* FIQ routed to monitor mode */
11080             new_mode = ARM_CPU_MODE_MON;
11081         }
11082         offset = 4;
11083         break;
11084     case EXCP_VIRQ:
11085         new_mode = ARM_CPU_MODE_IRQ;
11086         addr = 0x18;
11087         /* Disable IRQ and imprecise data aborts.  */
11088         mask = CPSR_A | CPSR_I;
11089         offset = 4;
11090         break;
11091     case EXCP_VFIQ:
11092         new_mode = ARM_CPU_MODE_FIQ;
11093         addr = 0x1c;
11094         /* Disable FIQ, IRQ and imprecise data aborts.  */
11095         mask = CPSR_A | CPSR_I | CPSR_F;
11096         offset = 4;
11097         break;
11098     case EXCP_VSERR:
11099         {
11100             /*
11101              * Note that this is reported as a data abort, but the DFAR
11102              * has an UNKNOWN value.  Construct the SError syndrome from
11103              * AET and ExT fields.
11104              */
11105             ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, };
11106 
11107             if (extended_addresses_enabled(env)) {
11108                 env->exception.fsr = arm_fi_to_lfsc(&fi);
11109             } else {
11110                 env->exception.fsr = arm_fi_to_sfsc(&fi);
11111             }
11112             env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000;
11113             A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
11114             qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n",
11115                           env->exception.fsr);
11116 
11117             new_mode = ARM_CPU_MODE_ABT;
11118             addr = 0x10;
11119             mask = CPSR_A | CPSR_I;
11120             offset = 8;
11121         }
11122         break;
11123     case EXCP_SMC:
11124         new_mode = ARM_CPU_MODE_MON;
11125         addr = 0x08;
11126         mask = CPSR_A | CPSR_I | CPSR_F;
11127         offset = 0;
11128         break;
11129     default:
11130         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
11131         return; /* Never happens.  Keep compiler happy.  */
11132     }
11133 
11134     if (new_mode == ARM_CPU_MODE_MON) {
11135         addr += env->cp15.mvbar;
11136     } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
11137         /* High vectors. When enabled, base address cannot be remapped. */
11138         addr += 0xffff0000;
11139     } else {
11140         /*
11141          * ARM v7 architectures provide a vector base address register to remap
11142          * the interrupt vector table.
11143          * This register is only followed in non-monitor mode, and is banked.
11144          * Note: only bits 31:5 are valid.
11145          */
11146         addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
11147     }
11148 
11149     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
11150         env->cp15.scr_el3 &= ~SCR_NS;
11151     }
11152 
11153     take_aarch32_exception(env, new_mode, mask, offset, addr);
11154 }
11155 
11156 static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
11157 {
11158     /*
11159      * Return the register number of the AArch64 view of the AArch32
11160      * register @aarch32_reg. The CPUARMState CPSR is assumed to still
11161      * be that of the AArch32 mode the exception came from.
11162      */
11163     int mode = env->uncached_cpsr & CPSR_M;
11164 
11165     switch (aarch32_reg) {
11166     case 0 ... 7:
11167         return aarch32_reg;
11168     case 8 ... 12:
11169         return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
11170     case 13:
11171         switch (mode) {
11172         case ARM_CPU_MODE_USR:
11173         case ARM_CPU_MODE_SYS:
11174             return 13;
11175         case ARM_CPU_MODE_HYP:
11176             return 15;
11177         case ARM_CPU_MODE_IRQ:
11178             return 17;
11179         case ARM_CPU_MODE_SVC:
11180             return 19;
11181         case ARM_CPU_MODE_ABT:
11182             return 21;
11183         case ARM_CPU_MODE_UND:
11184             return 23;
11185         case ARM_CPU_MODE_FIQ:
11186             return 29;
11187         default:
11188             g_assert_not_reached();
11189         }
11190     case 14:
11191         switch (mode) {
11192         case ARM_CPU_MODE_USR:
11193         case ARM_CPU_MODE_SYS:
11194         case ARM_CPU_MODE_HYP:
11195             return 14;
11196         case ARM_CPU_MODE_IRQ:
11197             return 16;
11198         case ARM_CPU_MODE_SVC:
11199             return 18;
11200         case ARM_CPU_MODE_ABT:
11201             return 20;
11202         case ARM_CPU_MODE_UND:
11203             return 22;
11204         case ARM_CPU_MODE_FIQ:
11205             return 30;
11206         default:
11207             g_assert_not_reached();
11208         }
11209     case 15:
11210         return 31;
11211     default:
11212         g_assert_not_reached();
11213     }
11214 }
11215 
11216 static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
11217 {
11218     uint32_t ret = cpsr_read(env);
11219 
11220     /* Move DIT to the correct location for SPSR_ELx */
11221     if (ret & CPSR_DIT) {
11222         ret &= ~CPSR_DIT;
11223         ret |= PSTATE_DIT;
11224     }
11225     /* Merge PSTATE.SS into SPSR_ELx */
11226     ret |= env->pstate & PSTATE_SS;
11227 
11228     return ret;
11229 }
11230 
11231 static bool syndrome_is_sync_extabt(uint32_t syndrome)
11232 {
11233     /* Return true if this syndrome value is a synchronous external abort */
11234     switch (syn_get_ec(syndrome)) {
11235     case EC_INSNABORT:
11236     case EC_INSNABORT_SAME_EL:
11237     case EC_DATAABORT:
11238     case EC_DATAABORT_SAME_EL:
11239         /* Look at fault status code for all the synchronous ext abort cases */
11240         switch (syndrome & 0x3f) {
11241         case 0x10:
11242         case 0x13:
11243         case 0x14:
11244         case 0x15:
11245         case 0x16:
11246         case 0x17:
11247             return true;
11248         default:
11249             return false;
11250         }
11251     default:
11252         return false;
11253     }
11254 }
11255 
11256 /* Handle exception entry to a target EL which is using AArch64 */
11257 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
11258 {
11259     ARMCPU *cpu = ARM_CPU(cs);
11260     CPUARMState *env = &cpu->env;
11261     unsigned int new_el = env->exception.target_el;
11262     target_ulong addr = env->cp15.vbar_el[new_el];
11263     unsigned int new_mode = aarch64_pstate_mode(new_el, true);
11264     unsigned int old_mode;
11265     unsigned int cur_el = arm_current_el(env);
11266     int rt;
11267 
11268     if (tcg_enabled()) {
11269         /*
11270          * Note that new_el can never be 0.  If cur_el is 0, then
11271          * el0_a64 is is_a64(), else el0_a64 is ignored.
11272          */
11273         aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
11274     }
11275 
11276     if (cur_el < new_el) {
11277         /*
11278          * Entry vector offset depends on whether the implemented EL
11279          * immediately lower than the target level is using AArch32 or AArch64
11280          */
11281         bool is_aa64;
11282         uint64_t hcr;
11283 
11284         switch (new_el) {
11285         case 3:
11286             is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
11287             break;
11288         case 2:
11289             hcr = arm_hcr_el2_eff(env);
11290             if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
11291                 is_aa64 = (hcr & HCR_RW) != 0;
11292                 break;
11293             }
11294             /* fall through */
11295         case 1:
11296             is_aa64 = is_a64(env);
11297             break;
11298         default:
11299             g_assert_not_reached();
11300         }
11301 
11302         if (is_aa64) {
11303             addr += 0x400;
11304         } else {
11305             addr += 0x600;
11306         }
11307     } else if (pstate_read(env) & PSTATE_SP) {
11308         addr += 0x200;
11309     }
11310 
11311     switch (cs->exception_index) {
11312     case EXCP_GPC:
11313         qemu_log_mask(CPU_LOG_INT, "...with MFAR 0x%" PRIx64 "\n",
11314                       env->cp15.mfar_el3);
11315         /* fall through */
11316     case EXCP_PREFETCH_ABORT:
11317     case EXCP_DATA_ABORT:
11318         /*
11319          * FEAT_DoubleFault allows synchronous external aborts taken to EL3
11320          * to be taken to the SError vector entrypoint.
11321          */
11322         if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) &&
11323             syndrome_is_sync_extabt(env->exception.syndrome)) {
11324             addr += 0x180;
11325         }
11326         env->cp15.far_el[new_el] = env->exception.vaddress;
11327         qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
11328                       env->cp15.far_el[new_el]);
11329         /* fall through */
11330     case EXCP_BKPT:
11331     case EXCP_UDEF:
11332     case EXCP_SWI:
11333     case EXCP_HVC:
11334     case EXCP_HYP_TRAP:
11335     case EXCP_SMC:
11336         switch (syn_get_ec(env->exception.syndrome)) {
11337         case EC_ADVSIMDFPACCESSTRAP:
11338             /*
11339              * QEMU internal FP/SIMD syndromes from AArch32 include the
11340              * TA and coproc fields which are only exposed if the exception
11341              * is taken to AArch32 Hyp mode. Mask them out to get a valid
11342              * AArch64 format syndrome.
11343              */
11344             env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
11345             break;
11346         case EC_CP14RTTRAP:
11347         case EC_CP15RTTRAP:
11348         case EC_CP14DTTRAP:
11349             /*
11350              * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
11351              * the raw register field from the insn; when taking this to
11352              * AArch64 we must convert it to the AArch64 view of the register
11353              * number. Notice that we read a 4-bit AArch32 register number and
11354              * write back a 5-bit AArch64 one.
11355              */
11356             rt = extract32(env->exception.syndrome, 5, 4);
11357             rt = aarch64_regnum(env, rt);
11358             env->exception.syndrome = deposit32(env->exception.syndrome,
11359                                                 5, 5, rt);
11360             break;
11361         case EC_CP15RRTTRAP:
11362         case EC_CP14RRTTRAP:
11363             /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
11364             rt = extract32(env->exception.syndrome, 5, 4);
11365             rt = aarch64_regnum(env, rt);
11366             env->exception.syndrome = deposit32(env->exception.syndrome,
11367                                                 5, 5, rt);
11368             rt = extract32(env->exception.syndrome, 10, 4);
11369             rt = aarch64_regnum(env, rt);
11370             env->exception.syndrome = deposit32(env->exception.syndrome,
11371                                                 10, 5, rt);
11372             break;
11373         }
11374         env->cp15.esr_el[new_el] = env->exception.syndrome;
11375         break;
11376     case EXCP_IRQ:
11377     case EXCP_VIRQ:
11378         addr += 0x80;
11379         break;
11380     case EXCP_FIQ:
11381     case EXCP_VFIQ:
11382         addr += 0x100;
11383         break;
11384     case EXCP_VSERR:
11385         addr += 0x180;
11386         /* Construct the SError syndrome from IDS and ISS fields. */
11387         env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff);
11388         env->cp15.esr_el[new_el] = env->exception.syndrome;
11389         break;
11390     default:
11391         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
11392     }
11393 
11394     if (is_a64(env)) {
11395         old_mode = pstate_read(env);
11396         aarch64_save_sp(env, arm_current_el(env));
11397         env->elr_el[new_el] = env->pc;
11398 
11399         if (cur_el == 1 && new_el == 1) {
11400             uint64_t hcr = arm_hcr_el2_eff(env);
11401             if ((hcr & (HCR_NV | HCR_NV1 | HCR_NV2)) == HCR_NV ||
11402                 (hcr & (HCR_NV | HCR_NV2)) == (HCR_NV | HCR_NV2)) {
11403                 /*
11404                  * FEAT_NV, FEAT_NV2 may need to report EL2 in the SPSR
11405                  * by setting M[3:2] to 0b10.
11406                  * If NV2 is disabled, change SPSR when NV,NV1 == 1,0 (I_ZJRNN)
11407                  * If NV2 is enabled, change SPSR when NV is 1 (I_DBTLM)
11408                  */
11409                 old_mode = deposit32(old_mode, 2, 2, 2);
11410             }
11411         }
11412     } else {
11413         old_mode = cpsr_read_for_spsr_elx(env);
11414         env->elr_el[new_el] = env->regs[15];
11415 
11416         aarch64_sync_32_to_64(env);
11417 
11418         env->condexec_bits = 0;
11419     }
11420     env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
11421 
11422     qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%x\n", old_mode);
11423     qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
11424                   env->elr_el[new_el]);
11425 
11426     if (cpu_isar_feature(aa64_pan, cpu)) {
11427         /* The value of PSTATE.PAN is normally preserved, except when ... */
11428         new_mode |= old_mode & PSTATE_PAN;
11429         switch (new_el) {
11430         case 2:
11431             /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ...  */
11432             if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
11433                 != (HCR_E2H | HCR_TGE)) {
11434                 break;
11435             }
11436             /* fall through */
11437         case 1:
11438             /* ... the target is EL1 ... */
11439             /* ... and SCTLR_ELx.SPAN == 0, then set to 1.  */
11440             if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
11441                 new_mode |= PSTATE_PAN;
11442             }
11443             break;
11444         }
11445     }
11446     if (cpu_isar_feature(aa64_mte, cpu)) {
11447         new_mode |= PSTATE_TCO;
11448     }
11449 
11450     if (cpu_isar_feature(aa64_ssbs, cpu)) {
11451         if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) {
11452             new_mode |= PSTATE_SSBS;
11453         } else {
11454             new_mode &= ~PSTATE_SSBS;
11455         }
11456     }
11457 
11458     pstate_write(env, PSTATE_DAIF | new_mode);
11459     env->aarch64 = true;
11460     aarch64_restore_sp(env, new_el);
11461 
11462     if (tcg_enabled()) {
11463         helper_rebuild_hflags_a64(env, new_el);
11464     }
11465 
11466     env->pc = addr;
11467 
11468     qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
11469                   new_el, env->pc, pstate_read(env));
11470 }
11471 
11472 /*
11473  * Do semihosting call and set the appropriate return value. All the
11474  * permission and validity checks have been done at translate time.
11475  *
11476  * We only see semihosting exceptions in TCG only as they are not
11477  * trapped to the hypervisor in KVM.
11478  */
11479 #ifdef CONFIG_TCG
11480 static void tcg_handle_semihosting(CPUState *cs)
11481 {
11482     ARMCPU *cpu = ARM_CPU(cs);
11483     CPUARMState *env = &cpu->env;
11484 
11485     if (is_a64(env)) {
11486         qemu_log_mask(CPU_LOG_INT,
11487                       "...handling as semihosting call 0x%" PRIx64 "\n",
11488                       env->xregs[0]);
11489         do_common_semihosting(cs);
11490         env->pc += 4;
11491     } else {
11492         qemu_log_mask(CPU_LOG_INT,
11493                       "...handling as semihosting call 0x%x\n",
11494                       env->regs[0]);
11495         do_common_semihosting(cs);
11496         env->regs[15] += env->thumb ? 2 : 4;
11497     }
11498 }
11499 #endif
11500 
11501 /*
11502  * Handle a CPU exception for A and R profile CPUs.
11503  * Do any appropriate logging, handle PSCI calls, and then hand off
11504  * to the AArch64-entry or AArch32-entry function depending on the
11505  * target exception level's register width.
11506  *
11507  * Note: this is used for both TCG (as the do_interrupt tcg op),
11508  *       and KVM to re-inject guest debug exceptions, and to
11509  *       inject a Synchronous-External-Abort.
11510  */
11511 void arm_cpu_do_interrupt(CPUState *cs)
11512 {
11513     ARMCPU *cpu = ARM_CPU(cs);
11514     CPUARMState *env = &cpu->env;
11515     unsigned int new_el = env->exception.target_el;
11516 
11517     assert(!arm_feature(env, ARM_FEATURE_M));
11518 
11519     arm_log_exception(cs);
11520     qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
11521                   new_el);
11522     if (qemu_loglevel_mask(CPU_LOG_INT)
11523         && !excp_is_internal(cs->exception_index)) {
11524         qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
11525                       syn_get_ec(env->exception.syndrome),
11526                       env->exception.syndrome);
11527     }
11528 
11529     if (tcg_enabled() && arm_is_psci_call(cpu, cs->exception_index)) {
11530         arm_handle_psci_call(cpu);
11531         qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
11532         return;
11533     }
11534 
11535     /*
11536      * Semihosting semantics depend on the register width of the code
11537      * that caused the exception, not the target exception level, so
11538      * must be handled here.
11539      */
11540 #ifdef CONFIG_TCG
11541     if (cs->exception_index == EXCP_SEMIHOST) {
11542         tcg_handle_semihosting(cs);
11543         return;
11544     }
11545 #endif
11546 
11547     /*
11548      * Hooks may change global state so BQL should be held, also the
11549      * BQL needs to be held for any modification of
11550      * cs->interrupt_request.
11551      */
11552     g_assert(bql_locked());
11553 
11554     arm_call_pre_el_change_hook(cpu);
11555 
11556     assert(!excp_is_internal(cs->exception_index));
11557     if (arm_el_is_aa64(env, new_el)) {
11558         arm_cpu_do_interrupt_aarch64(cs);
11559     } else {
11560         arm_cpu_do_interrupt_aarch32(cs);
11561     }
11562 
11563     arm_call_el_change_hook(cpu);
11564 
11565     if (!kvm_enabled()) {
11566         cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
11567     }
11568 }
11569 #endif /* !CONFIG_USER_ONLY */
11570 
11571 uint64_t arm_sctlr(CPUARMState *env, int el)
11572 {
11573     /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
11574     if (el == 0) {
11575         ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
11576         el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
11577     }
11578     return env->cp15.sctlr_el[el];
11579 }
11580 
11581 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
11582 {
11583     if (regime_has_2_ranges(mmu_idx)) {
11584         return extract64(tcr, 37, 2);
11585     } else if (regime_is_stage2(mmu_idx)) {
11586         return 0; /* VTCR_EL2 */
11587     } else {
11588         /* Replicate the single TBI bit so we always have 2 bits.  */
11589         return extract32(tcr, 20, 1) * 3;
11590     }
11591 }
11592 
11593 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
11594 {
11595     if (regime_has_2_ranges(mmu_idx)) {
11596         return extract64(tcr, 51, 2);
11597     } else if (regime_is_stage2(mmu_idx)) {
11598         return 0; /* VTCR_EL2 */
11599     } else {
11600         /* Replicate the single TBID bit so we always have 2 bits.  */
11601         return extract32(tcr, 29, 1) * 3;
11602     }
11603 }
11604 
11605 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
11606 {
11607     if (regime_has_2_ranges(mmu_idx)) {
11608         return extract64(tcr, 57, 2);
11609     } else {
11610         /* Replicate the single TCMA bit so we always have 2 bits.  */
11611         return extract32(tcr, 30, 1) * 3;
11612     }
11613 }
11614 
11615 static ARMGranuleSize tg0_to_gran_size(int tg)
11616 {
11617     switch (tg) {
11618     case 0:
11619         return Gran4K;
11620     case 1:
11621         return Gran64K;
11622     case 2:
11623         return Gran16K;
11624     default:
11625         return GranInvalid;
11626     }
11627 }
11628 
11629 static ARMGranuleSize tg1_to_gran_size(int tg)
11630 {
11631     switch (tg) {
11632     case 1:
11633         return Gran16K;
11634     case 2:
11635         return Gran4K;
11636     case 3:
11637         return Gran64K;
11638     default:
11639         return GranInvalid;
11640     }
11641 }
11642 
11643 static inline bool have4k(ARMCPU *cpu, bool stage2)
11644 {
11645     return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu)
11646         : cpu_isar_feature(aa64_tgran4, cpu);
11647 }
11648 
11649 static inline bool have16k(ARMCPU *cpu, bool stage2)
11650 {
11651     return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu)
11652         : cpu_isar_feature(aa64_tgran16, cpu);
11653 }
11654 
11655 static inline bool have64k(ARMCPU *cpu, bool stage2)
11656 {
11657     return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu)
11658         : cpu_isar_feature(aa64_tgran64, cpu);
11659 }
11660 
11661 static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran,
11662                                          bool stage2)
11663 {
11664     switch (gran) {
11665     case Gran4K:
11666         if (have4k(cpu, stage2)) {
11667             return gran;
11668         }
11669         break;
11670     case Gran16K:
11671         if (have16k(cpu, stage2)) {
11672             return gran;
11673         }
11674         break;
11675     case Gran64K:
11676         if (have64k(cpu, stage2)) {
11677             return gran;
11678         }
11679         break;
11680     case GranInvalid:
11681         break;
11682     }
11683     /*
11684      * If the guest selects a granule size that isn't implemented,
11685      * the architecture requires that we behave as if it selected one
11686      * that is (with an IMPDEF choice of which one to pick). We choose
11687      * to implement the smallest supported granule size.
11688      */
11689     if (have4k(cpu, stage2)) {
11690         return Gran4K;
11691     }
11692     if (have16k(cpu, stage2)) {
11693         return Gran16K;
11694     }
11695     assert(have64k(cpu, stage2));
11696     return Gran64K;
11697 }
11698 
11699 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
11700                                    ARMMMUIdx mmu_idx, bool data,
11701                                    bool el1_is_aa32)
11702 {
11703     uint64_t tcr = regime_tcr(env, mmu_idx);
11704     bool epd, hpd, tsz_oob, ds, ha, hd;
11705     int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
11706     ARMGranuleSize gran;
11707     ARMCPU *cpu = env_archcpu(env);
11708     bool stage2 = regime_is_stage2(mmu_idx);
11709 
11710     if (!regime_has_2_ranges(mmu_idx)) {
11711         select = 0;
11712         tsz = extract32(tcr, 0, 6);
11713         gran = tg0_to_gran_size(extract32(tcr, 14, 2));
11714         if (stage2) {
11715             /* VTCR_EL2 */
11716             hpd = false;
11717         } else {
11718             hpd = extract32(tcr, 24, 1);
11719         }
11720         epd = false;
11721         sh = extract32(tcr, 12, 2);
11722         ps = extract32(tcr, 16, 3);
11723         ha = extract32(tcr, 21, 1) && cpu_isar_feature(aa64_hafs, cpu);
11724         hd = extract32(tcr, 22, 1) && cpu_isar_feature(aa64_hdbs, cpu);
11725         ds = extract64(tcr, 32, 1);
11726     } else {
11727         bool e0pd;
11728 
11729         /*
11730          * Bit 55 is always between the two regions, and is canonical for
11731          * determining if address tagging is enabled.
11732          */
11733         select = extract64(va, 55, 1);
11734         if (!select) {
11735             tsz = extract32(tcr, 0, 6);
11736             gran = tg0_to_gran_size(extract32(tcr, 14, 2));
11737             epd = extract32(tcr, 7, 1);
11738             sh = extract32(tcr, 12, 2);
11739             hpd = extract64(tcr, 41, 1);
11740             e0pd = extract64(tcr, 55, 1);
11741         } else {
11742             tsz = extract32(tcr, 16, 6);
11743             gran = tg1_to_gran_size(extract32(tcr, 30, 2));
11744             epd = extract32(tcr, 23, 1);
11745             sh = extract32(tcr, 28, 2);
11746             hpd = extract64(tcr, 42, 1);
11747             e0pd = extract64(tcr, 56, 1);
11748         }
11749         ps = extract64(tcr, 32, 3);
11750         ha = extract64(tcr, 39, 1) && cpu_isar_feature(aa64_hafs, cpu);
11751         hd = extract64(tcr, 40, 1) && cpu_isar_feature(aa64_hdbs, cpu);
11752         ds = extract64(tcr, 59, 1);
11753 
11754         if (e0pd && cpu_isar_feature(aa64_e0pd, cpu) &&
11755             regime_is_user(env, mmu_idx)) {
11756             epd = true;
11757         }
11758     }
11759 
11760     gran = sanitize_gran_size(cpu, gran, stage2);
11761 
11762     if (cpu_isar_feature(aa64_st, cpu)) {
11763         max_tsz = 48 - (gran == Gran64K);
11764     } else {
11765         max_tsz = 39;
11766     }
11767 
11768     /*
11769      * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
11770      * adjust the effective value of DS, as documented.
11771      */
11772     min_tsz = 16;
11773     if (gran == Gran64K) {
11774         if (cpu_isar_feature(aa64_lva, cpu)) {
11775             min_tsz = 12;
11776         }
11777         ds = false;
11778     } else if (ds) {
11779         if (regime_is_stage2(mmu_idx)) {
11780             if (gran == Gran16K) {
11781                 ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
11782             } else {
11783                 ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
11784             }
11785         } else {
11786             if (gran == Gran16K) {
11787                 ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
11788             } else {
11789                 ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
11790             }
11791         }
11792         if (ds) {
11793             min_tsz = 12;
11794         }
11795     }
11796 
11797     if (stage2 && el1_is_aa32) {
11798         /*
11799          * For AArch32 EL1 the min txsz (and thus max IPA size) requirements
11800          * are loosened: a configured IPA of 40 bits is permitted even if
11801          * the implemented PA is less than that (and so a 40 bit IPA would
11802          * fault for an AArch64 EL1). See R_DTLMN.
11803          */
11804         min_tsz = MIN(min_tsz, 24);
11805     }
11806 
11807     if (tsz > max_tsz) {
11808         tsz = max_tsz;
11809         tsz_oob = true;
11810     } else if (tsz < min_tsz) {
11811         tsz = min_tsz;
11812         tsz_oob = true;
11813     } else {
11814         tsz_oob = false;
11815     }
11816 
11817     /* Present TBI as a composite with TBID.  */
11818     tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
11819     if (!data) {
11820         tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
11821     }
11822     tbi = (tbi >> select) & 1;
11823 
11824     return (ARMVAParameters) {
11825         .tsz = tsz,
11826         .ps = ps,
11827         .sh = sh,
11828         .select = select,
11829         .tbi = tbi,
11830         .epd = epd,
11831         .hpd = hpd,
11832         .tsz_oob = tsz_oob,
11833         .ds = ds,
11834         .ha = ha,
11835         .hd = ha && hd,
11836         .gran = gran,
11837     };
11838 }
11839 
11840 /*
11841  * Note that signed overflow is undefined in C.  The following routines are
11842  * careful to use unsigned types where modulo arithmetic is required.
11843  * Failure to do so _will_ break on newer gcc.
11844  */
11845 
11846 /* Signed saturating arithmetic.  */
11847 
11848 /* Perform 16-bit signed saturating addition.  */
11849 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
11850 {
11851     uint16_t res;
11852 
11853     res = a + b;
11854     if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
11855         if (a & 0x8000) {
11856             res = 0x8000;
11857         } else {
11858             res = 0x7fff;
11859         }
11860     }
11861     return res;
11862 }
11863 
11864 /* Perform 8-bit signed saturating addition.  */
11865 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
11866 {
11867     uint8_t res;
11868 
11869     res = a + b;
11870     if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
11871         if (a & 0x80) {
11872             res = 0x80;
11873         } else {
11874             res = 0x7f;
11875         }
11876     }
11877     return res;
11878 }
11879 
11880 /* Perform 16-bit signed saturating subtraction.  */
11881 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
11882 {
11883     uint16_t res;
11884 
11885     res = a - b;
11886     if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
11887         if (a & 0x8000) {
11888             res = 0x8000;
11889         } else {
11890             res = 0x7fff;
11891         }
11892     }
11893     return res;
11894 }
11895 
11896 /* Perform 8-bit signed saturating subtraction.  */
11897 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
11898 {
11899     uint8_t res;
11900 
11901     res = a - b;
11902     if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
11903         if (a & 0x80) {
11904             res = 0x80;
11905         } else {
11906             res = 0x7f;
11907         }
11908     }
11909     return res;
11910 }
11911 
11912 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
11913 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
11914 #define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
11915 #define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
11916 #define PFX q
11917 
11918 #include "op_addsub.h"
11919 
11920 /* Unsigned saturating arithmetic.  */
11921 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
11922 {
11923     uint16_t res;
11924     res = a + b;
11925     if (res < a) {
11926         res = 0xffff;
11927     }
11928     return res;
11929 }
11930 
11931 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
11932 {
11933     if (a > b) {
11934         return a - b;
11935     } else {
11936         return 0;
11937     }
11938 }
11939 
11940 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
11941 {
11942     uint8_t res;
11943     res = a + b;
11944     if (res < a) {
11945         res = 0xff;
11946     }
11947     return res;
11948 }
11949 
11950 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
11951 {
11952     if (a > b) {
11953         return a - b;
11954     } else {
11955         return 0;
11956     }
11957 }
11958 
11959 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
11960 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
11961 #define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
11962 #define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
11963 #define PFX uq
11964 
11965 #include "op_addsub.h"
11966 
11967 /* Signed modulo arithmetic.  */
11968 #define SARITH16(a, b, n, op) do { \
11969     int32_t sum; \
11970     sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
11971     RESULT(sum, n, 16); \
11972     if (sum >= 0) \
11973         ge |= 3 << (n * 2); \
11974     } while (0)
11975 
11976 #define SARITH8(a, b, n, op) do { \
11977     int32_t sum; \
11978     sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
11979     RESULT(sum, n, 8); \
11980     if (sum >= 0) \
11981         ge |= 1 << n; \
11982     } while (0)
11983 
11984 
11985 #define ADD16(a, b, n) SARITH16(a, b, n, +)
11986 #define SUB16(a, b, n) SARITH16(a, b, n, -)
11987 #define ADD8(a, b, n)  SARITH8(a, b, n, +)
11988 #define SUB8(a, b, n)  SARITH8(a, b, n, -)
11989 #define PFX s
11990 #define ARITH_GE
11991 
11992 #include "op_addsub.h"
11993 
11994 /* Unsigned modulo arithmetic.  */
11995 #define ADD16(a, b, n) do { \
11996     uint32_t sum; \
11997     sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
11998     RESULT(sum, n, 16); \
11999     if ((sum >> 16) == 1) \
12000         ge |= 3 << (n * 2); \
12001     } while (0)
12002 
12003 #define ADD8(a, b, n) do { \
12004     uint32_t sum; \
12005     sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
12006     RESULT(sum, n, 8); \
12007     if ((sum >> 8) == 1) \
12008         ge |= 1 << n; \
12009     } while (0)
12010 
12011 #define SUB16(a, b, n) do { \
12012     uint32_t sum; \
12013     sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
12014     RESULT(sum, n, 16); \
12015     if ((sum >> 16) == 0) \
12016         ge |= 3 << (n * 2); \
12017     } while (0)
12018 
12019 #define SUB8(a, b, n) do { \
12020     uint32_t sum; \
12021     sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
12022     RESULT(sum, n, 8); \
12023     if ((sum >> 8) == 0) \
12024         ge |= 1 << n; \
12025     } while (0)
12026 
12027 #define PFX u
12028 #define ARITH_GE
12029 
12030 #include "op_addsub.h"
12031 
12032 /* Halved signed arithmetic.  */
12033 #define ADD16(a, b, n) \
12034   RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
12035 #define SUB16(a, b, n) \
12036   RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
12037 #define ADD8(a, b, n) \
12038   RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
12039 #define SUB8(a, b, n) \
12040   RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
12041 #define PFX sh
12042 
12043 #include "op_addsub.h"
12044 
12045 /* Halved unsigned arithmetic.  */
12046 #define ADD16(a, b, n) \
12047   RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12048 #define SUB16(a, b, n) \
12049   RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12050 #define ADD8(a, b, n) \
12051   RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12052 #define SUB8(a, b, n) \
12053   RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12054 #define PFX uh
12055 
12056 #include "op_addsub.h"
12057 
12058 static inline uint8_t do_usad(uint8_t a, uint8_t b)
12059 {
12060     if (a > b) {
12061         return a - b;
12062     } else {
12063         return b - a;
12064     }
12065 }
12066 
12067 /* Unsigned sum of absolute byte differences.  */
12068 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
12069 {
12070     uint32_t sum;
12071     sum = do_usad(a, b);
12072     sum += do_usad(a >> 8, b >> 8);
12073     sum += do_usad(a >> 16, b >> 16);
12074     sum += do_usad(a >> 24, b >> 24);
12075     return sum;
12076 }
12077 
12078 /* For ARMv6 SEL instruction.  */
12079 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
12080 {
12081     uint32_t mask;
12082 
12083     mask = 0;
12084     if (flags & 1) {
12085         mask |= 0xff;
12086     }
12087     if (flags & 2) {
12088         mask |= 0xff00;
12089     }
12090     if (flags & 4) {
12091         mask |= 0xff0000;
12092     }
12093     if (flags & 8) {
12094         mask |= 0xff000000;
12095     }
12096     return (a & mask) | (b & ~mask);
12097 }
12098 
12099 /*
12100  * CRC helpers.
12101  * The upper bytes of val (above the number specified by 'bytes') must have
12102  * been zeroed out by the caller.
12103  */
12104 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
12105 {
12106     uint8_t buf[4];
12107 
12108     stl_le_p(buf, val);
12109 
12110     /* zlib crc32 converts the accumulator and output to one's complement.  */
12111     return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
12112 }
12113 
12114 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
12115 {
12116     uint8_t buf[4];
12117 
12118     stl_le_p(buf, val);
12119 
12120     /* Linux crc32c converts the output to one's complement.  */
12121     return crc32c(acc, buf, bytes) ^ 0xffffffff;
12122 }
12123 
12124 /*
12125  * Return the exception level to which FP-disabled exceptions should
12126  * be taken, or 0 if FP is enabled.
12127  */
12128 int fp_exception_el(CPUARMState *env, int cur_el)
12129 {
12130 #ifndef CONFIG_USER_ONLY
12131     uint64_t hcr_el2;
12132 
12133     /*
12134      * CPACR and the CPTR registers don't exist before v6, so FP is
12135      * always accessible
12136      */
12137     if (!arm_feature(env, ARM_FEATURE_V6)) {
12138         return 0;
12139     }
12140 
12141     if (arm_feature(env, ARM_FEATURE_M)) {
12142         /* CPACR can cause a NOCP UsageFault taken to current security state */
12143         if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
12144             return 1;
12145         }
12146 
12147         if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
12148             if (!extract32(env->v7m.nsacr, 10, 1)) {
12149                 /* FP insns cause a NOCP UsageFault taken to Secure */
12150                 return 3;
12151             }
12152         }
12153 
12154         return 0;
12155     }
12156 
12157     hcr_el2 = arm_hcr_el2_eff(env);
12158 
12159     /*
12160      * The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12161      * 0, 2 : trap EL0 and EL1/PL1 accesses
12162      * 1    : trap only EL0 accesses
12163      * 3    : trap no accesses
12164      * This register is ignored if E2H+TGE are both set.
12165      */
12166     if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
12167         int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN);
12168 
12169         switch (fpen) {
12170         case 1:
12171             if (cur_el != 0) {
12172                 break;
12173             }
12174             /* fall through */
12175         case 0:
12176         case 2:
12177             /* Trap from Secure PL0 or PL1 to Secure PL1. */
12178             if (!arm_el_is_aa64(env, 3)
12179                 && (cur_el == 3 || arm_is_secure_below_el3(env))) {
12180                 return 3;
12181             }
12182             if (cur_el <= 1) {
12183                 return 1;
12184             }
12185             break;
12186         }
12187     }
12188 
12189     /*
12190      * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
12191      * to control non-secure access to the FPU. It doesn't have any
12192      * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
12193      */
12194     if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
12195          cur_el <= 2 && !arm_is_secure_below_el3(env))) {
12196         if (!extract32(env->cp15.nsacr, 10, 1)) {
12197             /* FP insns act as UNDEF */
12198             return cur_el == 2 ? 2 : 1;
12199         }
12200     }
12201 
12202     /*
12203      * CPTR_EL2 is present in v7VE or v8, and changes format
12204      * with HCR_EL2.E2H (regardless of TGE).
12205      */
12206     if (cur_el <= 2) {
12207         if (hcr_el2 & HCR_E2H) {
12208             switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
12209             case 1:
12210                 if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) {
12211                     break;
12212                 }
12213                 /* fall through */
12214             case 0:
12215             case 2:
12216                 return 2;
12217             }
12218         } else if (arm_is_el2_enabled(env)) {
12219             if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
12220                 return 2;
12221             }
12222         }
12223     }
12224 
12225     /* CPTR_EL3 : present in v8 */
12226     if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) {
12227         /* Trap all FP ops to EL3 */
12228         return 3;
12229     }
12230 #endif
12231     return 0;
12232 }
12233 
12234 /* Return the exception level we're running at if this is our mmu_idx */
12235 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
12236 {
12237     if (mmu_idx & ARM_MMU_IDX_M) {
12238         return mmu_idx & ARM_MMU_IDX_M_PRIV;
12239     }
12240 
12241     switch (mmu_idx) {
12242     case ARMMMUIdx_E10_0:
12243     case ARMMMUIdx_E20_0:
12244         return 0;
12245     case ARMMMUIdx_E10_1:
12246     case ARMMMUIdx_E10_1_PAN:
12247         return 1;
12248     case ARMMMUIdx_E2:
12249     case ARMMMUIdx_E20_2:
12250     case ARMMMUIdx_E20_2_PAN:
12251         return 2;
12252     case ARMMMUIdx_E3:
12253         return 3;
12254     default:
12255         g_assert_not_reached();
12256     }
12257 }
12258 
12259 #ifndef CONFIG_TCG
12260 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
12261 {
12262     g_assert_not_reached();
12263 }
12264 #endif
12265 
12266 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
12267 {
12268     ARMMMUIdx idx;
12269     uint64_t hcr;
12270 
12271     if (arm_feature(env, ARM_FEATURE_M)) {
12272         return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
12273     }
12274 
12275     /* See ARM pseudo-function ELIsInHost.  */
12276     switch (el) {
12277     case 0:
12278         hcr = arm_hcr_el2_eff(env);
12279         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
12280             idx = ARMMMUIdx_E20_0;
12281         } else {
12282             idx = ARMMMUIdx_E10_0;
12283         }
12284         break;
12285     case 1:
12286         if (arm_pan_enabled(env)) {
12287             idx = ARMMMUIdx_E10_1_PAN;
12288         } else {
12289             idx = ARMMMUIdx_E10_1;
12290         }
12291         break;
12292     case 2:
12293         /* Note that TGE does not apply at EL2.  */
12294         if (arm_hcr_el2_eff(env) & HCR_E2H) {
12295             if (arm_pan_enabled(env)) {
12296                 idx = ARMMMUIdx_E20_2_PAN;
12297             } else {
12298                 idx = ARMMMUIdx_E20_2;
12299             }
12300         } else {
12301             idx = ARMMMUIdx_E2;
12302         }
12303         break;
12304     case 3:
12305         return ARMMMUIdx_E3;
12306     default:
12307         g_assert_not_reached();
12308     }
12309 
12310     return idx;
12311 }
12312 
12313 ARMMMUIdx arm_mmu_idx(CPUARMState *env)
12314 {
12315     return arm_mmu_idx_el(env, arm_current_el(env));
12316 }
12317 
12318 static bool mve_no_pred(CPUARMState *env)
12319 {
12320     /*
12321      * Return true if there is definitely no predication of MVE
12322      * instructions by VPR or LTPSIZE. (Returning false even if there
12323      * isn't any predication is OK; generated code will just be
12324      * a little worse.)
12325      * If the CPU does not implement MVE then this TB flag is always 0.
12326      *
12327      * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
12328      * logic in gen_update_fp_context() needs to be updated to match.
12329      *
12330      * We do not include the effect of the ECI bits here -- they are
12331      * tracked in other TB flags. This simplifies the logic for
12332      * "when did we emit code that changes the MVE_NO_PRED TB flag
12333      * and thus need to end the TB?".
12334      */
12335     if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
12336         return false;
12337     }
12338     if (env->v7m.vpr) {
12339         return false;
12340     }
12341     if (env->v7m.ltpsize < 4) {
12342         return false;
12343     }
12344     return true;
12345 }
12346 
12347 void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
12348                           uint64_t *cs_base, uint32_t *pflags)
12349 {
12350     CPUARMTBFlags flags;
12351 
12352     assert_hflags_rebuild_correctly(env);
12353     flags = env->hflags;
12354 
12355     if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
12356         *pc = env->pc;
12357         if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
12358             DP_TBFLAG_A64(flags, BTYPE, env->btype);
12359         }
12360     } else {
12361         *pc = env->regs[15];
12362 
12363         if (arm_feature(env, ARM_FEATURE_M)) {
12364             if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
12365                 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
12366                 != env->v7m.secure) {
12367                 DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
12368             }
12369 
12370             if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
12371                 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
12372                  (env->v7m.secure &&
12373                   !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
12374                 /*
12375                  * ASPEN is set, but FPCA/SFPA indicate that there is no
12376                  * active FP context; we must create a new FP context before
12377                  * executing any FP insn.
12378                  */
12379                 DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
12380             }
12381 
12382             bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
12383             if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
12384                 DP_TBFLAG_M32(flags, LSPACT, 1);
12385             }
12386 
12387             if (mve_no_pred(env)) {
12388                 DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
12389             }
12390         } else {
12391             /*
12392              * Note that XSCALE_CPAR shares bits with VECSTRIDE.
12393              * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
12394              */
12395             if (arm_feature(env, ARM_FEATURE_XSCALE)) {
12396                 DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
12397             } else {
12398                 DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
12399                 DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
12400             }
12401             if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
12402                 DP_TBFLAG_A32(flags, VFPEN, 1);
12403             }
12404         }
12405 
12406         DP_TBFLAG_AM32(flags, THUMB, env->thumb);
12407         DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
12408     }
12409 
12410     /*
12411      * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
12412      * states defined in the ARM ARM for software singlestep:
12413      *  SS_ACTIVE   PSTATE.SS   State
12414      *     0            x       Inactive (the TB flag for SS is always 0)
12415      *     1            0       Active-pending
12416      *     1            1       Active-not-pending
12417      * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
12418      */
12419     if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
12420         DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
12421     }
12422 
12423     *pflags = flags.flags;
12424     *cs_base = flags.flags2;
12425 }
12426 
12427 #ifdef TARGET_AARCH64
12428 /*
12429  * The manual says that when SVE is enabled and VQ is widened the
12430  * implementation is allowed to zero the previously inaccessible
12431  * portion of the registers.  The corollary to that is that when
12432  * SVE is enabled and VQ is narrowed we are also allowed to zero
12433  * the now inaccessible portion of the registers.
12434  *
12435  * The intent of this is that no predicate bit beyond VQ is ever set.
12436  * Which means that some operations on predicate registers themselves
12437  * may operate on full uint64_t or even unrolled across the maximum
12438  * uint64_t[4].  Performing 4 bits of host arithmetic unconditionally
12439  * may well be cheaper than conditionals to restrict the operation
12440  * to the relevant portion of a uint16_t[16].
12441  */
12442 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
12443 {
12444     int i, j;
12445     uint64_t pmask;
12446 
12447     assert(vq >= 1 && vq <= ARM_MAX_VQ);
12448     assert(vq <= env_archcpu(env)->sve_max_vq);
12449 
12450     /* Zap the high bits of the zregs.  */
12451     for (i = 0; i < 32; i++) {
12452         memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
12453     }
12454 
12455     /* Zap the high bits of the pregs and ffr.  */
12456     pmask = 0;
12457     if (vq & 3) {
12458         pmask = ~(-1ULL << (16 * (vq & 3)));
12459     }
12460     for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
12461         for (i = 0; i < 17; ++i) {
12462             env->vfp.pregs[i].p[j] &= pmask;
12463         }
12464         pmask = 0;
12465     }
12466 }
12467 
12468 static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm)
12469 {
12470     int exc_el;
12471 
12472     if (sm) {
12473         exc_el = sme_exception_el(env, el);
12474     } else {
12475         exc_el = sve_exception_el(env, el);
12476     }
12477     if (exc_el) {
12478         return 0; /* disabled */
12479     }
12480     return sve_vqm1_for_el_sm(env, el, sm);
12481 }
12482 
12483 /*
12484  * Notice a change in SVE vector size when changing EL.
12485  */
12486 void aarch64_sve_change_el(CPUARMState *env, int old_el,
12487                            int new_el, bool el0_a64)
12488 {
12489     ARMCPU *cpu = env_archcpu(env);
12490     int old_len, new_len;
12491     bool old_a64, new_a64, sm;
12492 
12493     /* Nothing to do if no SVE.  */
12494     if (!cpu_isar_feature(aa64_sve, cpu)) {
12495         return;
12496     }
12497 
12498     /* Nothing to do if FP is disabled in either EL.  */
12499     if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
12500         return;
12501     }
12502 
12503     old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
12504     new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
12505 
12506     /*
12507      * Both AArch64.TakeException and AArch64.ExceptionReturn
12508      * invoke ResetSVEState when taking an exception from, or
12509      * returning to, AArch32 state when PSTATE.SM is enabled.
12510      */
12511     sm = FIELD_EX64(env->svcr, SVCR, SM);
12512     if (old_a64 != new_a64 && sm) {
12513         arm_reset_sve_state(env);
12514         return;
12515     }
12516 
12517     /*
12518      * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
12519      * at ELx, or not available because the EL is in AArch32 state, then
12520      * for all purposes other than a direct read, the ZCR_ELx.LEN field
12521      * has an effective value of 0".
12522      *
12523      * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
12524      * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
12525      * from EL2->EL1.  Thus we go ahead and narrow when entering aa32 so that
12526      * we already have the correct register contents when encountering the
12527      * vq0->vq0 transition between EL0->EL1.
12528      */
12529     old_len = new_len = 0;
12530     if (old_a64) {
12531         old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm);
12532     }
12533     if (new_a64) {
12534         new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm);
12535     }
12536 
12537     /* When changing vector length, clear inaccessible state.  */
12538     if (new_len < old_len) {
12539         aarch64_sve_narrow_vq(env, new_len + 1);
12540     }
12541 }
12542 #endif
12543 
12544 #ifndef CONFIG_USER_ONLY
12545 ARMSecuritySpace arm_security_space(CPUARMState *env)
12546 {
12547     if (arm_feature(env, ARM_FEATURE_M)) {
12548         return arm_secure_to_space(env->v7m.secure);
12549     }
12550 
12551     /*
12552      * If EL3 is not supported then the secure state is implementation
12553      * defined, in which case QEMU defaults to non-secure.
12554      */
12555     if (!arm_feature(env, ARM_FEATURE_EL3)) {
12556         return ARMSS_NonSecure;
12557     }
12558 
12559     /* Check for AArch64 EL3 or AArch32 Mon. */
12560     if (is_a64(env)) {
12561         if (extract32(env->pstate, 2, 2) == 3) {
12562             if (cpu_isar_feature(aa64_rme, env_archcpu(env))) {
12563                 return ARMSS_Root;
12564             } else {
12565                 return ARMSS_Secure;
12566             }
12567         }
12568     } else {
12569         if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
12570             return ARMSS_Secure;
12571         }
12572     }
12573 
12574     return arm_security_space_below_el3(env);
12575 }
12576 
12577 ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env)
12578 {
12579     assert(!arm_feature(env, ARM_FEATURE_M));
12580 
12581     /*
12582      * If EL3 is not supported then the secure state is implementation
12583      * defined, in which case QEMU defaults to non-secure.
12584      */
12585     if (!arm_feature(env, ARM_FEATURE_EL3)) {
12586         return ARMSS_NonSecure;
12587     }
12588 
12589     /*
12590      * Note NSE cannot be set without RME, and NSE & !NS is Reserved.
12591      * Ignoring NSE when !NS retains consistency without having to
12592      * modify other predicates.
12593      */
12594     if (!(env->cp15.scr_el3 & SCR_NS)) {
12595         return ARMSS_Secure;
12596     } else if (env->cp15.scr_el3 & SCR_NSE) {
12597         return ARMSS_Realm;
12598     } else {
12599         return ARMSS_NonSecure;
12600     }
12601 }
12602 #endif /* !CONFIG_USER_ONLY */
12603