xref: /qemu/target/arm/ptw.c (revision 2db07d05)
1 /*
2  * ARM page table walking.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "qemu/range.h"
12 #include "qemu/main-loop.h"
13 #include "exec/exec-all.h"
14 #include "cpu.h"
15 #include "internals.h"
16 #include "idau.h"
17 
18 
19 typedef struct S1Translate {
20     ARMMMUIdx in_mmu_idx;
21     ARMMMUIdx in_ptw_idx;
22     bool in_secure;
23     bool in_debug;
24     bool out_secure;
25     bool out_rw;
26     bool out_be;
27     hwaddr out_virt;
28     hwaddr out_phys;
29     void *out_host;
30 } S1Translate;
31 
32 static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
33                                uint64_t address,
34                                MMUAccessType access_type, bool s1_is_el0,
35                                GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
36     __attribute__((nonnull));
37 
38 static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
39                                       target_ulong address,
40                                       MMUAccessType access_type,
41                                       GetPhysAddrResult *result,
42                                       ARMMMUFaultInfo *fi)
43     __attribute__((nonnull));
44 
45 /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
46 static const uint8_t pamax_map[] = {
47     [0] = 32,
48     [1] = 36,
49     [2] = 40,
50     [3] = 42,
51     [4] = 44,
52     [5] = 48,
53     [6] = 52,
54 };
55 
56 /* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
57 unsigned int arm_pamax(ARMCPU *cpu)
58 {
59     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
60         unsigned int parange =
61             FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
62 
63         /*
64          * id_aa64mmfr0 is a read-only register so values outside of the
65          * supported mappings can be considered an implementation error.
66          */
67         assert(parange < ARRAY_SIZE(pamax_map));
68         return pamax_map[parange];
69     }
70 
71     /*
72      * In machvirt_init, we call arm_pamax on a cpu that is not fully
73      * initialized, so we can't rely on the propagation done in realize.
74      */
75     if (arm_feature(&cpu->env, ARM_FEATURE_LPAE) ||
76         arm_feature(&cpu->env, ARM_FEATURE_V7VE)) {
77         /* v7 with LPAE */
78         return 40;
79     }
80     /* Anything else */
81     return 32;
82 }
83 
84 /*
85  * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
86  */
87 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
88 {
89     switch (mmu_idx) {
90     case ARMMMUIdx_E10_0:
91         return ARMMMUIdx_Stage1_E0;
92     case ARMMMUIdx_E10_1:
93         return ARMMMUIdx_Stage1_E1;
94     case ARMMMUIdx_E10_1_PAN:
95         return ARMMMUIdx_Stage1_E1_PAN;
96     default:
97         return mmu_idx;
98     }
99 }
100 
101 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
102 {
103     return stage_1_mmu_idx(arm_mmu_idx(env));
104 }
105 
106 static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
107 {
108     return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
109 }
110 
111 /* Return the TTBR associated with this translation regime */
112 static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
113 {
114     if (mmu_idx == ARMMMUIdx_Stage2) {
115         return env->cp15.vttbr_el2;
116     }
117     if (mmu_idx == ARMMMUIdx_Stage2_S) {
118         return env->cp15.vsttbr_el2;
119     }
120     if (ttbrn == 0) {
121         return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
122     } else {
123         return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
124     }
125 }
126 
127 /* Return true if the specified stage of address translation is disabled */
128 static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
129                                         bool is_secure)
130 {
131     uint64_t hcr_el2;
132 
133     if (arm_feature(env, ARM_FEATURE_M)) {
134         switch (env->v7m.mpu_ctrl[is_secure] &
135                 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
136         case R_V7M_MPU_CTRL_ENABLE_MASK:
137             /* Enabled, but not for HardFault and NMI */
138             return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
139         case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
140             /* Enabled for all cases */
141             return false;
142         case 0:
143         default:
144             /*
145              * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
146              * we warned about that in armv7m_nvic.c when the guest set it.
147              */
148             return true;
149         }
150     }
151 
152     hcr_el2 = arm_hcr_el2_eff_secstate(env, is_secure);
153 
154     switch (mmu_idx) {
155     case ARMMMUIdx_Stage2:
156     case ARMMMUIdx_Stage2_S:
157         /* HCR.DC means HCR.VM behaves as 1 */
158         return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
159 
160     case ARMMMUIdx_E10_0:
161     case ARMMMUIdx_E10_1:
162     case ARMMMUIdx_E10_1_PAN:
163         /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
164         if (hcr_el2 & HCR_TGE) {
165             return true;
166         }
167         break;
168 
169     case ARMMMUIdx_Stage1_E0:
170     case ARMMMUIdx_Stage1_E1:
171     case ARMMMUIdx_Stage1_E1_PAN:
172         /* HCR.DC means SCTLR_EL1.M behaves as 0 */
173         if (hcr_el2 & HCR_DC) {
174             return true;
175         }
176         break;
177 
178     case ARMMMUIdx_E20_0:
179     case ARMMMUIdx_E20_2:
180     case ARMMMUIdx_E20_2_PAN:
181     case ARMMMUIdx_E2:
182     case ARMMMUIdx_E3:
183         break;
184 
185     case ARMMMUIdx_Phys_NS:
186     case ARMMMUIdx_Phys_S:
187         /* No translation for physical address spaces. */
188         return true;
189 
190     default:
191         g_assert_not_reached();
192     }
193 
194     return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
195 }
196 
197 static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
198 {
199     /*
200      * For an S1 page table walk, the stage 1 attributes are always
201      * some form of "this is Normal memory". The combined S1+S2
202      * attributes are therefore only Device if stage 2 specifies Device.
203      * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
204      * ie when cacheattrs.attrs bits [3:2] are 0b00.
205      * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
206      * when cacheattrs.attrs bit [2] is 0.
207      */
208     if (hcr & HCR_FWB) {
209         return (attrs & 0x4) == 0;
210     } else {
211         return (attrs & 0xc) == 0;
212     }
213 }
214 
215 /* Translate a S1 pagetable walk through S2 if needed.  */
216 static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
217                              hwaddr addr, ARMMMUFaultInfo *fi)
218 {
219     bool is_secure = ptw->in_secure;
220     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
221     ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
222     uint8_t pte_attrs;
223     bool pte_secure;
224 
225     ptw->out_virt = addr;
226 
227     if (unlikely(ptw->in_debug)) {
228         /*
229          * From gdbstub, do not use softmmu so that we don't modify the
230          * state of the cpu at all, including softmmu tlb contents.
231          */
232         if (regime_is_stage2(s2_mmu_idx)) {
233             S1Translate s2ptw = {
234                 .in_mmu_idx = s2_mmu_idx,
235                 .in_ptw_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS,
236                 .in_secure = is_secure,
237                 .in_debug = true,
238             };
239             GetPhysAddrResult s2 = { };
240 
241             if (!get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD,
242                                     false, &s2, fi)) {
243                 goto fail;
244             }
245             ptw->out_phys = s2.f.phys_addr;
246             pte_attrs = s2.cacheattrs.attrs;
247             pte_secure = s2.f.attrs.secure;
248         } else {
249             /* Regime is physical. */
250             ptw->out_phys = addr;
251             pte_attrs = 0;
252             pte_secure = is_secure;
253         }
254         ptw->out_host = NULL;
255         ptw->out_rw = false;
256     } else {
257         CPUTLBEntryFull *full;
258         int flags;
259 
260         env->tlb_fi = fi;
261         flags = probe_access_full(env, addr, MMU_DATA_LOAD,
262                                   arm_to_core_mmu_idx(s2_mmu_idx),
263                                   true, &ptw->out_host, &full, 0);
264         env->tlb_fi = NULL;
265 
266         if (unlikely(flags & TLB_INVALID_MASK)) {
267             goto fail;
268         }
269         ptw->out_phys = full->phys_addr;
270         ptw->out_rw = full->prot & PAGE_WRITE;
271         pte_attrs = full->pte_attrs;
272         pte_secure = full->attrs.secure;
273     }
274 
275     if (regime_is_stage2(s2_mmu_idx)) {
276         uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
277 
278         if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) {
279             /*
280              * PTW set and S1 walk touched S2 Device memory:
281              * generate Permission fault.
282              */
283             fi->type = ARMFault_Permission;
284             fi->s2addr = addr;
285             fi->stage2 = true;
286             fi->s1ptw = true;
287             fi->s1ns = !is_secure;
288             return false;
289         }
290     }
291 
292     /* Check if page table walk is to secure or non-secure PA space. */
293     ptw->out_secure = (is_secure
294                        && !(pte_secure
295                             ? env->cp15.vstcr_el2 & VSTCR_SW
296                             : env->cp15.vtcr_el2 & VTCR_NSW));
297     ptw->out_be = regime_translation_big_endian(env, mmu_idx);
298     return true;
299 
300  fail:
301     assert(fi->type != ARMFault_None);
302     fi->s2addr = addr;
303     fi->stage2 = true;
304     fi->s1ptw = true;
305     fi->s1ns = !is_secure;
306     return false;
307 }
308 
309 /* All loads done in the course of a page table walk go through here. */
310 static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw,
311                             ARMMMUFaultInfo *fi)
312 {
313     CPUState *cs = env_cpu(env);
314     void *host = ptw->out_host;
315     uint32_t data;
316 
317     if (likely(host)) {
318         /* Page tables are in RAM, and we have the host address. */
319         data = qatomic_read((uint32_t *)host);
320         if (ptw->out_be) {
321             data = be32_to_cpu(data);
322         } else {
323             data = le32_to_cpu(data);
324         }
325     } else {
326         /* Page tables are in MMIO. */
327         MemTxAttrs attrs = { .secure = ptw->out_secure };
328         AddressSpace *as = arm_addressspace(cs, attrs);
329         MemTxResult result = MEMTX_OK;
330 
331         if (ptw->out_be) {
332             data = address_space_ldl_be(as, ptw->out_phys, attrs, &result);
333         } else {
334             data = address_space_ldl_le(as, ptw->out_phys, attrs, &result);
335         }
336         if (unlikely(result != MEMTX_OK)) {
337             fi->type = ARMFault_SyncExternalOnWalk;
338             fi->ea = arm_extabort_type(result);
339             return 0;
340         }
341     }
342     return data;
343 }
344 
345 static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw,
346                             ARMMMUFaultInfo *fi)
347 {
348     CPUState *cs = env_cpu(env);
349     void *host = ptw->out_host;
350     uint64_t data;
351 
352     if (likely(host)) {
353         /* Page tables are in RAM, and we have the host address. */
354 #ifdef CONFIG_ATOMIC64
355         data = qatomic_read__nocheck((uint64_t *)host);
356         if (ptw->out_be) {
357             data = be64_to_cpu(data);
358         } else {
359             data = le64_to_cpu(data);
360         }
361 #else
362         if (ptw->out_be) {
363             data = ldq_be_p(host);
364         } else {
365             data = ldq_le_p(host);
366         }
367 #endif
368     } else {
369         /* Page tables are in MMIO. */
370         MemTxAttrs attrs = { .secure = ptw->out_secure };
371         AddressSpace *as = arm_addressspace(cs, attrs);
372         MemTxResult result = MEMTX_OK;
373 
374         if (ptw->out_be) {
375             data = address_space_ldq_be(as, ptw->out_phys, attrs, &result);
376         } else {
377             data = address_space_ldq_le(as, ptw->out_phys, attrs, &result);
378         }
379         if (unlikely(result != MEMTX_OK)) {
380             fi->type = ARMFault_SyncExternalOnWalk;
381             fi->ea = arm_extabort_type(result);
382             return 0;
383         }
384     }
385     return data;
386 }
387 
388 static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
389                              uint64_t new_val, S1Translate *ptw,
390                              ARMMMUFaultInfo *fi)
391 {
392     uint64_t cur_val;
393     void *host = ptw->out_host;
394 
395     if (unlikely(!host)) {
396         fi->type = ARMFault_UnsuppAtomicUpdate;
397         fi->s1ptw = true;
398         return 0;
399     }
400 
401     /*
402      * Raising a stage2 Protection fault for an atomic update to a read-only
403      * page is delayed until it is certain that there is a change to make.
404      */
405     if (unlikely(!ptw->out_rw)) {
406         int flags;
407         void *discard;
408 
409         env->tlb_fi = fi;
410         flags = probe_access_flags(env, ptw->out_virt, MMU_DATA_STORE,
411                                    arm_to_core_mmu_idx(ptw->in_ptw_idx),
412                                    true, &discard, 0);
413         env->tlb_fi = NULL;
414 
415         if (unlikely(flags & TLB_INVALID_MASK)) {
416             assert(fi->type != ARMFault_None);
417             fi->s2addr = ptw->out_virt;
418             fi->stage2 = true;
419             fi->s1ptw = true;
420             fi->s1ns = !ptw->in_secure;
421             return 0;
422         }
423 
424         /* In case CAS mismatches and we loop, remember writability. */
425         ptw->out_rw = true;
426     }
427 
428 #ifdef CONFIG_ATOMIC64
429     if (ptw->out_be) {
430         old_val = cpu_to_be64(old_val);
431         new_val = cpu_to_be64(new_val);
432         cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val);
433         cur_val = be64_to_cpu(cur_val);
434     } else {
435         old_val = cpu_to_le64(old_val);
436         new_val = cpu_to_le64(new_val);
437         cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val);
438         cur_val = le64_to_cpu(cur_val);
439     }
440 #else
441     /*
442      * We can't support the full 64-bit atomic cmpxchg on the host.
443      * Because this is only used for FEAT_HAFDBS, which is only for AA64,
444      * we know that TCG_OVERSIZED_GUEST is set, which means that we are
445      * running in round-robin mode and could only race with dma i/o.
446      */
447 #ifndef TCG_OVERSIZED_GUEST
448 # error "Unexpected configuration"
449 #endif
450     bool locked = qemu_mutex_iothread_locked();
451     if (!locked) {
452        qemu_mutex_lock_iothread();
453     }
454     if (ptw->out_be) {
455         cur_val = ldq_be_p(host);
456         if (cur_val == old_val) {
457             stq_be_p(host, new_val);
458         }
459     } else {
460         cur_val = ldq_le_p(host);
461         if (cur_val == old_val) {
462             stq_le_p(host, new_val);
463         }
464     }
465     if (!locked) {
466         qemu_mutex_unlock_iothread();
467     }
468 #endif
469 
470     return cur_val;
471 }
472 
473 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
474                                      uint32_t *table, uint32_t address)
475 {
476     /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
477     uint64_t tcr = regime_tcr(env, mmu_idx);
478     int maskshift = extract32(tcr, 0, 3);
479     uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift);
480     uint32_t base_mask;
481 
482     if (address & mask) {
483         if (tcr & TTBCR_PD1) {
484             /* Translation table walk disabled for TTBR1 */
485             return false;
486         }
487         *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
488     } else {
489         if (tcr & TTBCR_PD0) {
490             /* Translation table walk disabled for TTBR0 */
491             return false;
492         }
493         base_mask = ~((uint32_t)0x3fffu >> maskshift);
494         *table = regime_ttbr(env, mmu_idx, 0) & base_mask;
495     }
496     *table |= (address >> 18) & 0x3ffc;
497     return true;
498 }
499 
500 /*
501  * Translate section/page access permissions to page R/W protection flags
502  * @env:         CPUARMState
503  * @mmu_idx:     MMU index indicating required translation regime
504  * @ap:          The 3-bit access permissions (AP[2:0])
505  * @domain_prot: The 2-bit domain access permissions
506  */
507 static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
508                          int ap, int domain_prot)
509 {
510     bool is_user = regime_is_user(env, mmu_idx);
511 
512     if (domain_prot == 3) {
513         return PAGE_READ | PAGE_WRITE;
514     }
515 
516     switch (ap) {
517     case 0:
518         if (arm_feature(env, ARM_FEATURE_V7)) {
519             return 0;
520         }
521         switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
522         case SCTLR_S:
523             return is_user ? 0 : PAGE_READ;
524         case SCTLR_R:
525             return PAGE_READ;
526         default:
527             return 0;
528         }
529     case 1:
530         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
531     case 2:
532         if (is_user) {
533             return PAGE_READ;
534         } else {
535             return PAGE_READ | PAGE_WRITE;
536         }
537     case 3:
538         return PAGE_READ | PAGE_WRITE;
539     case 4: /* Reserved.  */
540         return 0;
541     case 5:
542         return is_user ? 0 : PAGE_READ;
543     case 6:
544         return PAGE_READ;
545     case 7:
546         if (!arm_feature(env, ARM_FEATURE_V6K)) {
547             return 0;
548         }
549         return PAGE_READ;
550     default:
551         g_assert_not_reached();
552     }
553 }
554 
555 /*
556  * Translate section/page access permissions to page R/W protection flags.
557  * @ap:      The 2-bit simple AP (AP[2:1])
558  * @is_user: TRUE if accessing from PL0
559  */
560 static int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
561 {
562     switch (ap) {
563     case 0:
564         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
565     case 1:
566         return PAGE_READ | PAGE_WRITE;
567     case 2:
568         return is_user ? 0 : PAGE_READ;
569     case 3:
570         return PAGE_READ;
571     default:
572         g_assert_not_reached();
573     }
574 }
575 
576 static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
577 {
578     return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
579 }
580 
581 static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
582                              uint32_t address, MMUAccessType access_type,
583                              GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
584 {
585     int level = 1;
586     uint32_t table;
587     uint32_t desc;
588     int type;
589     int ap;
590     int domain = 0;
591     int domain_prot;
592     hwaddr phys_addr;
593     uint32_t dacr;
594 
595     /* Pagetable walk.  */
596     /* Lookup l1 descriptor.  */
597     if (!get_level1_table_address(env, ptw->in_mmu_idx, &table, address)) {
598         /* Section translation fault if page walk is disabled by PD0 or PD1 */
599         fi->type = ARMFault_Translation;
600         goto do_fault;
601     }
602     if (!S1_ptw_translate(env, ptw, table, fi)) {
603         goto do_fault;
604     }
605     desc = arm_ldl_ptw(env, ptw, fi);
606     if (fi->type != ARMFault_None) {
607         goto do_fault;
608     }
609     type = (desc & 3);
610     domain = (desc >> 5) & 0x0f;
611     if (regime_el(env, ptw->in_mmu_idx) == 1) {
612         dacr = env->cp15.dacr_ns;
613     } else {
614         dacr = env->cp15.dacr_s;
615     }
616     domain_prot = (dacr >> (domain * 2)) & 3;
617     if (type == 0) {
618         /* Section translation fault.  */
619         fi->type = ARMFault_Translation;
620         goto do_fault;
621     }
622     if (type != 2) {
623         level = 2;
624     }
625     if (domain_prot == 0 || domain_prot == 2) {
626         fi->type = ARMFault_Domain;
627         goto do_fault;
628     }
629     if (type == 2) {
630         /* 1Mb section.  */
631         phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
632         ap = (desc >> 10) & 3;
633         result->f.lg_page_size = 20; /* 1MB */
634     } else {
635         /* Lookup l2 entry.  */
636         if (type == 1) {
637             /* Coarse pagetable.  */
638             table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
639         } else {
640             /* Fine pagetable.  */
641             table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
642         }
643         if (!S1_ptw_translate(env, ptw, table, fi)) {
644             goto do_fault;
645         }
646         desc = arm_ldl_ptw(env, ptw, fi);
647         if (fi->type != ARMFault_None) {
648             goto do_fault;
649         }
650         switch (desc & 3) {
651         case 0: /* Page translation fault.  */
652             fi->type = ARMFault_Translation;
653             goto do_fault;
654         case 1: /* 64k page.  */
655             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
656             ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
657             result->f.lg_page_size = 16;
658             break;
659         case 2: /* 4k page.  */
660             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
661             ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
662             result->f.lg_page_size = 12;
663             break;
664         case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
665             if (type == 1) {
666                 /* ARMv6/XScale extended small page format */
667                 if (arm_feature(env, ARM_FEATURE_XSCALE)
668                     || arm_feature(env, ARM_FEATURE_V6)) {
669                     phys_addr = (desc & 0xfffff000) | (address & 0xfff);
670                     result->f.lg_page_size = 12;
671                 } else {
672                     /*
673                      * UNPREDICTABLE in ARMv5; we choose to take a
674                      * page translation fault.
675                      */
676                     fi->type = ARMFault_Translation;
677                     goto do_fault;
678                 }
679             } else {
680                 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
681                 result->f.lg_page_size = 10;
682             }
683             ap = (desc >> 4) & 3;
684             break;
685         default:
686             /* Never happens, but compiler isn't smart enough to tell.  */
687             g_assert_not_reached();
688         }
689     }
690     result->f.prot = ap_to_rw_prot(env, ptw->in_mmu_idx, ap, domain_prot);
691     result->f.prot |= result->f.prot ? PAGE_EXEC : 0;
692     if (!(result->f.prot & (1 << access_type))) {
693         /* Access permission fault.  */
694         fi->type = ARMFault_Permission;
695         goto do_fault;
696     }
697     result->f.phys_addr = phys_addr;
698     return false;
699 do_fault:
700     fi->domain = domain;
701     fi->level = level;
702     return true;
703 }
704 
705 static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
706                              uint32_t address, MMUAccessType access_type,
707                              GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
708 {
709     ARMCPU *cpu = env_archcpu(env);
710     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
711     int level = 1;
712     uint32_t table;
713     uint32_t desc;
714     uint32_t xn;
715     uint32_t pxn = 0;
716     int type;
717     int ap;
718     int domain = 0;
719     int domain_prot;
720     hwaddr phys_addr;
721     uint32_t dacr;
722     bool ns;
723 
724     /* Pagetable walk.  */
725     /* Lookup l1 descriptor.  */
726     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
727         /* Section translation fault if page walk is disabled by PD0 or PD1 */
728         fi->type = ARMFault_Translation;
729         goto do_fault;
730     }
731     if (!S1_ptw_translate(env, ptw, table, fi)) {
732         goto do_fault;
733     }
734     desc = arm_ldl_ptw(env, ptw, fi);
735     if (fi->type != ARMFault_None) {
736         goto do_fault;
737     }
738     type = (desc & 3);
739     if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
740         /* Section translation fault, or attempt to use the encoding
741          * which is Reserved on implementations without PXN.
742          */
743         fi->type = ARMFault_Translation;
744         goto do_fault;
745     }
746     if ((type == 1) || !(desc & (1 << 18))) {
747         /* Page or Section.  */
748         domain = (desc >> 5) & 0x0f;
749     }
750     if (regime_el(env, mmu_idx) == 1) {
751         dacr = env->cp15.dacr_ns;
752     } else {
753         dacr = env->cp15.dacr_s;
754     }
755     if (type == 1) {
756         level = 2;
757     }
758     domain_prot = (dacr >> (domain * 2)) & 3;
759     if (domain_prot == 0 || domain_prot == 2) {
760         /* Section or Page domain fault */
761         fi->type = ARMFault_Domain;
762         goto do_fault;
763     }
764     if (type != 1) {
765         if (desc & (1 << 18)) {
766             /* Supersection.  */
767             phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
768             phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
769             phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
770             result->f.lg_page_size = 24;  /* 16MB */
771         } else {
772             /* Section.  */
773             phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
774             result->f.lg_page_size = 20;  /* 1MB */
775         }
776         ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
777         xn = desc & (1 << 4);
778         pxn = desc & 1;
779         ns = extract32(desc, 19, 1);
780     } else {
781         if (cpu_isar_feature(aa32_pxn, cpu)) {
782             pxn = (desc >> 2) & 1;
783         }
784         ns = extract32(desc, 3, 1);
785         /* Lookup l2 entry.  */
786         table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
787         if (!S1_ptw_translate(env, ptw, table, fi)) {
788             goto do_fault;
789         }
790         desc = arm_ldl_ptw(env, ptw, fi);
791         if (fi->type != ARMFault_None) {
792             goto do_fault;
793         }
794         ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
795         switch (desc & 3) {
796         case 0: /* Page translation fault.  */
797             fi->type = ARMFault_Translation;
798             goto do_fault;
799         case 1: /* 64k page.  */
800             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
801             xn = desc & (1 << 15);
802             result->f.lg_page_size = 16;
803             break;
804         case 2: case 3: /* 4k page.  */
805             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
806             xn = desc & 1;
807             result->f.lg_page_size = 12;
808             break;
809         default:
810             /* Never happens, but compiler isn't smart enough to tell.  */
811             g_assert_not_reached();
812         }
813     }
814     if (domain_prot == 3) {
815         result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
816     } else {
817         if (pxn && !regime_is_user(env, mmu_idx)) {
818             xn = 1;
819         }
820         if (xn && access_type == MMU_INST_FETCH) {
821             fi->type = ARMFault_Permission;
822             goto do_fault;
823         }
824 
825         if (arm_feature(env, ARM_FEATURE_V6K) &&
826                 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
827             /* The simplified model uses AP[0] as an access control bit.  */
828             if ((ap & 1) == 0) {
829                 /* Access flag fault.  */
830                 fi->type = ARMFault_AccessFlag;
831                 goto do_fault;
832             }
833             result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
834         } else {
835             result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
836         }
837         if (result->f.prot && !xn) {
838             result->f.prot |= PAGE_EXEC;
839         }
840         if (!(result->f.prot & (1 << access_type))) {
841             /* Access permission fault.  */
842             fi->type = ARMFault_Permission;
843             goto do_fault;
844         }
845     }
846     if (ns) {
847         /* The NS bit will (as required by the architecture) have no effect if
848          * the CPU doesn't support TZ or this is a non-secure translation
849          * regime, because the attribute will already be non-secure.
850          */
851         result->f.attrs.secure = false;
852     }
853     result->f.phys_addr = phys_addr;
854     return false;
855 do_fault:
856     fi->domain = domain;
857     fi->level = level;
858     return true;
859 }
860 
861 /*
862  * Translate S2 section/page access permissions to protection flags
863  * @env:     CPUARMState
864  * @s2ap:    The 2-bit stage2 access permissions (S2AP)
865  * @xn:      XN (execute-never) bits
866  * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
867  */
868 static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
869 {
870     int prot = 0;
871 
872     if (s2ap & 1) {
873         prot |= PAGE_READ;
874     }
875     if (s2ap & 2) {
876         prot |= PAGE_WRITE;
877     }
878 
879     if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
880         switch (xn) {
881         case 0:
882             prot |= PAGE_EXEC;
883             break;
884         case 1:
885             if (s1_is_el0) {
886                 prot |= PAGE_EXEC;
887             }
888             break;
889         case 2:
890             break;
891         case 3:
892             if (!s1_is_el0) {
893                 prot |= PAGE_EXEC;
894             }
895             break;
896         default:
897             g_assert_not_reached();
898         }
899     } else {
900         if (!extract32(xn, 1, 1)) {
901             if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
902                 prot |= PAGE_EXEC;
903             }
904         }
905     }
906     return prot;
907 }
908 
909 /*
910  * Translate section/page access permissions to protection flags
911  * @env:     CPUARMState
912  * @mmu_idx: MMU index indicating required translation regime
913  * @is_aa64: TRUE if AArch64
914  * @ap:      The 2-bit simple AP (AP[2:1])
915  * @ns:      NS (non-secure) bit
916  * @xn:      XN (execute-never) bit
917  * @pxn:     PXN (privileged execute-never) bit
918  */
919 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
920                       int ap, int ns, int xn, int pxn)
921 {
922     bool is_user = regime_is_user(env, mmu_idx);
923     int prot_rw, user_rw;
924     bool have_wxn;
925     int wxn = 0;
926 
927     assert(!regime_is_stage2(mmu_idx));
928 
929     user_rw = simple_ap_to_rw_prot_is_user(ap, true);
930     if (is_user) {
931         prot_rw = user_rw;
932     } else {
933         if (user_rw && regime_is_pan(env, mmu_idx)) {
934             /* PAN forbids data accesses but doesn't affect insn fetch */
935             prot_rw = 0;
936         } else {
937             prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
938         }
939     }
940 
941     if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
942         return prot_rw;
943     }
944 
945     /* TODO have_wxn should be replaced with
946      *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
947      * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
948      * compatible processors have EL2, which is required for [U]WXN.
949      */
950     have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
951 
952     if (have_wxn) {
953         wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
954     }
955 
956     if (is_aa64) {
957         if (regime_has_2_ranges(mmu_idx) && !is_user) {
958             xn = pxn || (user_rw & PAGE_WRITE);
959         }
960     } else if (arm_feature(env, ARM_FEATURE_V7)) {
961         switch (regime_el(env, mmu_idx)) {
962         case 1:
963         case 3:
964             if (is_user) {
965                 xn = xn || !(user_rw & PAGE_READ);
966             } else {
967                 int uwxn = 0;
968                 if (have_wxn) {
969                     uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
970                 }
971                 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
972                      (uwxn && (user_rw & PAGE_WRITE));
973             }
974             break;
975         case 2:
976             break;
977         }
978     } else {
979         xn = wxn = 0;
980     }
981 
982     if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
983         return prot_rw;
984     }
985     return prot_rw | PAGE_EXEC;
986 }
987 
988 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
989                                           ARMMMUIdx mmu_idx)
990 {
991     uint64_t tcr = regime_tcr(env, mmu_idx);
992     uint32_t el = regime_el(env, mmu_idx);
993     int select, tsz;
994     bool epd, hpd;
995 
996     assert(mmu_idx != ARMMMUIdx_Stage2_S);
997 
998     if (mmu_idx == ARMMMUIdx_Stage2) {
999         /* VTCR */
1000         bool sext = extract32(tcr, 4, 1);
1001         bool sign = extract32(tcr, 3, 1);
1002 
1003         /*
1004          * If the sign-extend bit is not the same as t0sz[3], the result
1005          * is unpredictable. Flag this as a guest error.
1006          */
1007         if (sign != sext) {
1008             qemu_log_mask(LOG_GUEST_ERROR,
1009                           "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
1010         }
1011         tsz = sextract32(tcr, 0, 4) + 8;
1012         select = 0;
1013         hpd = false;
1014         epd = false;
1015     } else if (el == 2) {
1016         /* HTCR */
1017         tsz = extract32(tcr, 0, 3);
1018         select = 0;
1019         hpd = extract64(tcr, 24, 1);
1020         epd = false;
1021     } else {
1022         int t0sz = extract32(tcr, 0, 3);
1023         int t1sz = extract32(tcr, 16, 3);
1024 
1025         if (t1sz == 0) {
1026             select = va > (0xffffffffu >> t0sz);
1027         } else {
1028             /* Note that we will detect errors later.  */
1029             select = va >= ~(0xffffffffu >> t1sz);
1030         }
1031         if (!select) {
1032             tsz = t0sz;
1033             epd = extract32(tcr, 7, 1);
1034             hpd = extract64(tcr, 41, 1);
1035         } else {
1036             tsz = t1sz;
1037             epd = extract32(tcr, 23, 1);
1038             hpd = extract64(tcr, 42, 1);
1039         }
1040         /* For aarch32, hpd0 is not enabled without t2e as well.  */
1041         hpd &= extract32(tcr, 6, 1);
1042     }
1043 
1044     return (ARMVAParameters) {
1045         .tsz = tsz,
1046         .select = select,
1047         .epd = epd,
1048         .hpd = hpd,
1049     };
1050 }
1051 
1052 /*
1053  * check_s2_mmu_setup
1054  * @cpu:        ARMCPU
1055  * @is_aa64:    True if the translation regime is in AArch64 state
1056  * @startlevel: Suggested starting level
1057  * @inputsize:  Bitsize of IPAs
1058  * @stride:     Page-table stride (See the ARM ARM)
1059  *
1060  * Returns true if the suggested S2 translation parameters are OK and
1061  * false otherwise.
1062  */
1063 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
1064                                int inputsize, int stride, int outputsize)
1065 {
1066     const int grainsize = stride + 3;
1067     int startsizecheck;
1068 
1069     /*
1070      * Negative levels are usually not allowed...
1071      * Except for FEAT_LPA2, 4k page table, 52-bit address space, which
1072      * begins with level -1.  Note that previous feature tests will have
1073      * eliminated this combination if it is not enabled.
1074      */
1075     if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) {
1076         return false;
1077     }
1078 
1079     startsizecheck = inputsize - ((3 - level) * stride + grainsize);
1080     if (startsizecheck < 1 || startsizecheck > stride + 4) {
1081         return false;
1082     }
1083 
1084     if (is_aa64) {
1085         switch (stride) {
1086         case 13: /* 64KB Pages.  */
1087             if (level == 0 || (level == 1 && outputsize <= 42)) {
1088                 return false;
1089             }
1090             break;
1091         case 11: /* 16KB Pages.  */
1092             if (level == 0 || (level == 1 && outputsize <= 40)) {
1093                 return false;
1094             }
1095             break;
1096         case 9: /* 4KB Pages.  */
1097             if (level == 0 && outputsize <= 42) {
1098                 return false;
1099             }
1100             break;
1101         default:
1102             g_assert_not_reached();
1103         }
1104 
1105         /* Inputsize checks.  */
1106         if (inputsize > outputsize &&
1107             (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) {
1108             /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
1109             return false;
1110         }
1111     } else {
1112         /* AArch32 only supports 4KB pages. Assert on that.  */
1113         assert(stride == 9);
1114 
1115         if (level == 0) {
1116             return false;
1117         }
1118     }
1119     return true;
1120 }
1121 
1122 /**
1123  * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
1124  *
1125  * Returns false if the translation was successful. Otherwise, phys_ptr,
1126  * attrs, prot and page_size may not be filled in, and the populated fsr
1127  * value provides information on why the translation aborted, in the format
1128  * of a long-format DFSR/IFSR fault register, with the following caveat:
1129  * the WnR bit is never set (the caller must do this).
1130  *
1131  * @env: CPUARMState
1132  * @ptw: Current and next stage parameters for the walk.
1133  * @address: virtual address to get physical address for
1134  * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
1135  * @s1_is_el0: if @ptw->in_mmu_idx is ARMMMUIdx_Stage2
1136  *             (so this is a stage 2 page table walk),
1137  *             must be true if this is stage 2 of a stage 1+2
1138  *             walk for an EL0 access. If @mmu_idx is anything else,
1139  *             @s1_is_el0 is ignored.
1140  * @result: set on translation success,
1141  * @fi: set to fault info if the translation fails
1142  */
1143 static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
1144                                uint64_t address,
1145                                MMUAccessType access_type, bool s1_is_el0,
1146                                GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1147 {
1148     ARMCPU *cpu = env_archcpu(env);
1149     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
1150     bool is_secure = ptw->in_secure;
1151     uint32_t level;
1152     ARMVAParameters param;
1153     uint64_t ttbr;
1154     hwaddr descaddr, indexmask, indexmask_grainsize;
1155     uint32_t tableattrs;
1156     target_ulong page_size;
1157     uint64_t attrs;
1158     int32_t stride;
1159     int addrsize, inputsize, outputsize;
1160     uint64_t tcr = regime_tcr(env, mmu_idx);
1161     int ap, ns, xn, pxn;
1162     uint32_t el = regime_el(env, mmu_idx);
1163     uint64_t descaddrmask;
1164     bool aarch64 = arm_el_is_aa64(env, el);
1165     uint64_t descriptor, new_descriptor;
1166     bool nstable;
1167 
1168     /* TODO: This code does not support shareability levels. */
1169     if (aarch64) {
1170         int ps;
1171 
1172         param = aa64_va_parameters(env, address, mmu_idx,
1173                                    access_type != MMU_INST_FETCH);
1174         level = 0;
1175 
1176         /*
1177          * If TxSZ is programmed to a value larger than the maximum,
1178          * or smaller than the effective minimum, it is IMPLEMENTATION
1179          * DEFINED whether we behave as if the field were programmed
1180          * within bounds, or if a level 0 Translation fault is generated.
1181          *
1182          * With FEAT_LVA, fault on less than minimum becomes required,
1183          * so our choice is to always raise the fault.
1184          */
1185         if (param.tsz_oob) {
1186             goto do_translation_fault;
1187         }
1188 
1189         addrsize = 64 - 8 * param.tbi;
1190         inputsize = 64 - param.tsz;
1191 
1192         /*
1193          * Bound PS by PARANGE to find the effective output address size.
1194          * ID_AA64MMFR0 is a read-only register so values outside of the
1195          * supported mappings can be considered an implementation error.
1196          */
1197         ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
1198         ps = MIN(ps, param.ps);
1199         assert(ps < ARRAY_SIZE(pamax_map));
1200         outputsize = pamax_map[ps];
1201     } else {
1202         param = aa32_va_parameters(env, address, mmu_idx);
1203         level = 1;
1204         addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
1205         inputsize = addrsize - param.tsz;
1206         outputsize = 40;
1207     }
1208 
1209     /*
1210      * We determined the region when collecting the parameters, but we
1211      * have not yet validated that the address is valid for the region.
1212      * Extract the top bits and verify that they all match select.
1213      *
1214      * For aa32, if inputsize == addrsize, then we have selected the
1215      * region by exclusion in aa32_va_parameters and there is no more
1216      * validation to do here.
1217      */
1218     if (inputsize < addrsize) {
1219         target_ulong top_bits = sextract64(address, inputsize,
1220                                            addrsize - inputsize);
1221         if (-top_bits != param.select) {
1222             /* The gap between the two regions is a Translation fault */
1223             goto do_translation_fault;
1224         }
1225     }
1226 
1227     stride = arm_granule_bits(param.gran) - 3;
1228 
1229     /*
1230      * Note that QEMU ignores shareability and cacheability attributes,
1231      * so we don't need to do anything with the SH, ORGN, IRGN fields
1232      * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
1233      * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
1234      * implement any ASID-like capability so we can ignore it (instead
1235      * we will always flush the TLB any time the ASID is changed).
1236      */
1237     ttbr = regime_ttbr(env, mmu_idx, param.select);
1238 
1239     /*
1240      * Here we should have set up all the parameters for the translation:
1241      * inputsize, ttbr, epd, stride, tbi
1242      */
1243 
1244     if (param.epd) {
1245         /*
1246          * Translation table walk disabled => Translation fault on TLB miss
1247          * Note: This is always 0 on 64-bit EL2 and EL3.
1248          */
1249         goto do_translation_fault;
1250     }
1251 
1252     if (!regime_is_stage2(mmu_idx)) {
1253         /*
1254          * The starting level depends on the virtual address size (which can
1255          * be up to 48 bits) and the translation granule size. It indicates
1256          * the number of strides (stride bits at a time) needed to
1257          * consume the bits of the input address. In the pseudocode this is:
1258          *  level = 4 - RoundUp((inputsize - grainsize) / stride)
1259          * where their 'inputsize' is our 'inputsize', 'grainsize' is
1260          * our 'stride + 3' and 'stride' is our 'stride'.
1261          * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
1262          * = 4 - (inputsize - stride - 3 + stride - 1) / stride
1263          * = 4 - (inputsize - 4) / stride;
1264          */
1265         level = 4 - (inputsize - 4) / stride;
1266     } else {
1267         /*
1268          * For stage 2 translations the starting level is specified by the
1269          * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
1270          */
1271         uint32_t sl0 = extract32(tcr, 6, 2);
1272         uint32_t sl2 = extract64(tcr, 33, 1);
1273         uint32_t startlevel;
1274         bool ok;
1275 
1276         /* SL2 is RES0 unless DS=1 & 4kb granule. */
1277         if (param.ds && stride == 9 && sl2) {
1278             if (sl0 != 0) {
1279                 level = 0;
1280                 goto do_translation_fault;
1281             }
1282             startlevel = -1;
1283         } else if (!aarch64 || stride == 9) {
1284             /* AArch32 or 4KB pages */
1285             startlevel = 2 - sl0;
1286 
1287             if (cpu_isar_feature(aa64_st, cpu)) {
1288                 startlevel &= 3;
1289             }
1290         } else {
1291             /* 16KB or 64KB pages */
1292             startlevel = 3 - sl0;
1293         }
1294 
1295         /* Check that the starting level is valid. */
1296         ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
1297                                 inputsize, stride, outputsize);
1298         if (!ok) {
1299             goto do_translation_fault;
1300         }
1301         level = startlevel;
1302     }
1303 
1304     indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3);
1305     indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level)));
1306 
1307     /* Now we can extract the actual base address from the TTBR */
1308     descaddr = extract64(ttbr, 0, 48);
1309 
1310     /*
1311      * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
1312      *
1313      * Otherwise, if the base address is out of range, raise AddressSizeFault.
1314      * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
1315      * but we've just cleared the bits above 47, so simplify the test.
1316      */
1317     if (outputsize > 48) {
1318         descaddr |= extract64(ttbr, 2, 4) << 48;
1319     } else if (descaddr >> outputsize) {
1320         level = 0;
1321         fi->type = ARMFault_AddressSize;
1322         goto do_fault;
1323     }
1324 
1325     /*
1326      * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
1327      * and also to mask out CnP (bit 0) which could validly be non-zero.
1328      */
1329     descaddr &= ~indexmask;
1330 
1331     /*
1332      * For AArch32, the address field in the descriptor goes up to bit 39
1333      * for both v7 and v8.  However, for v8 the SBZ bits [47:40] must be 0
1334      * or an AddressSize fault is raised.  So for v8 we extract those SBZ
1335      * bits as part of the address, which will be checked via outputsize.
1336      * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
1337      * the highest bits of a 52-bit output are placed elsewhere.
1338      */
1339     if (param.ds) {
1340         descaddrmask = MAKE_64BIT_MASK(0, 50);
1341     } else if (arm_feature(env, ARM_FEATURE_V8)) {
1342         descaddrmask = MAKE_64BIT_MASK(0, 48);
1343     } else {
1344         descaddrmask = MAKE_64BIT_MASK(0, 40);
1345     }
1346     descaddrmask &= ~indexmask_grainsize;
1347 
1348     /*
1349      * Secure accesses start with the page table in secure memory and
1350      * can be downgraded to non-secure at any step. Non-secure accesses
1351      * remain non-secure. We implement this by just ORing in the NSTable/NS
1352      * bits at each step.
1353      */
1354     tableattrs = is_secure ? 0 : (1 << 4);
1355 
1356  next_level:
1357     descaddr |= (address >> (stride * (4 - level))) & indexmask;
1358     descaddr &= ~7ULL;
1359     nstable = extract32(tableattrs, 4, 1);
1360     if (!nstable) {
1361         /*
1362          * Stage2_S -> Stage2 or Phys_S -> Phys_NS
1363          * Assert that the non-secure idx are even, and relative order.
1364          */
1365         QEMU_BUILD_BUG_ON((ARMMMUIdx_Phys_NS & 1) != 0);
1366         QEMU_BUILD_BUG_ON((ARMMMUIdx_Stage2 & 1) != 0);
1367         QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_NS + 1 != ARMMMUIdx_Phys_S);
1368         QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2 + 1 != ARMMMUIdx_Stage2_S);
1369         ptw->in_ptw_idx &= ~1;
1370         ptw->in_secure = false;
1371     }
1372     if (!S1_ptw_translate(env, ptw, descaddr, fi)) {
1373         goto do_fault;
1374     }
1375     descriptor = arm_ldq_ptw(env, ptw, fi);
1376     if (fi->type != ARMFault_None) {
1377         goto do_fault;
1378     }
1379     new_descriptor = descriptor;
1380 
1381  restart_atomic_update:
1382     if (!(descriptor & 1) || (!(descriptor & 2) && (level == 3))) {
1383         /* Invalid, or the Reserved level 3 encoding */
1384         goto do_translation_fault;
1385     }
1386 
1387     descaddr = descriptor & descaddrmask;
1388 
1389     /*
1390      * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
1391      * of descriptor.  For FEAT_LPA2 and effective DS, bits [51:50] of
1392      * descaddr are in [9:8].  Otherwise, if descaddr is out of range,
1393      * raise AddressSizeFault.
1394      */
1395     if (outputsize > 48) {
1396         if (param.ds) {
1397             descaddr |= extract64(descriptor, 8, 2) << 50;
1398         } else {
1399             descaddr |= extract64(descriptor, 12, 4) << 48;
1400         }
1401     } else if (descaddr >> outputsize) {
1402         fi->type = ARMFault_AddressSize;
1403         goto do_fault;
1404     }
1405 
1406     if ((descriptor & 2) && (level < 3)) {
1407         /*
1408          * Table entry. The top five bits are attributes which may
1409          * propagate down through lower levels of the table (and
1410          * which are all arranged so that 0 means "no effect", so
1411          * we can gather them up by ORing in the bits at each level).
1412          */
1413         tableattrs |= extract64(descriptor, 59, 5);
1414         level++;
1415         indexmask = indexmask_grainsize;
1416         goto next_level;
1417     }
1418 
1419     /*
1420      * Block entry at level 1 or 2, or page entry at level 3.
1421      * These are basically the same thing, although the number
1422      * of bits we pull in from the vaddr varies. Note that although
1423      * descaddrmask masks enough of the low bits of the descriptor
1424      * to give a correct page or table address, the address field
1425      * in a block descriptor is smaller; so we need to explicitly
1426      * clear the lower bits here before ORing in the low vaddr bits.
1427      *
1428      * Afterward, descaddr is the final physical address.
1429      */
1430     page_size = (1ULL << ((stride * (4 - level)) + 3));
1431     descaddr &= ~(hwaddr)(page_size - 1);
1432     descaddr |= (address & (page_size - 1));
1433 
1434     if (likely(!ptw->in_debug)) {
1435         /*
1436          * Access flag.
1437          * If HA is enabled, prepare to update the descriptor below.
1438          * Otherwise, pass the access fault on to software.
1439          */
1440         if (!(descriptor & (1 << 10))) {
1441             if (param.ha) {
1442                 new_descriptor |= 1 << 10; /* AF */
1443             } else {
1444                 fi->type = ARMFault_AccessFlag;
1445                 goto do_fault;
1446             }
1447         }
1448 
1449         /*
1450          * Dirty Bit.
1451          * If HD is enabled, pre-emptively set/clear the appropriate AP/S2AP
1452          * bit for writeback. The actual write protection test may still be
1453          * overridden by tableattrs, to be merged below.
1454          */
1455         if (param.hd
1456             && extract64(descriptor, 51, 1)  /* DBM */
1457             && access_type == MMU_DATA_STORE) {
1458             if (regime_is_stage2(mmu_idx)) {
1459                 new_descriptor |= 1ull << 7;    /* set S2AP[1] */
1460             } else {
1461                 new_descriptor &= ~(1ull << 7); /* clear AP[2] */
1462             }
1463         }
1464     }
1465 
1466     /*
1467      * Extract attributes from the (modified) descriptor, and apply
1468      * table descriptors. Stage 2 table descriptors do not include
1469      * any attribute fields. HPD disables all the table attributes
1470      * except NSTable.
1471      */
1472     attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
1473     if (!regime_is_stage2(mmu_idx)) {
1474         attrs |= nstable << 5; /* NS */
1475         if (!param.hpd) {
1476             attrs |= extract64(tableattrs, 0, 2) << 53;     /* XN, PXN */
1477             /*
1478              * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
1479              * means "force PL1 access only", which means forcing AP[1] to 0.
1480              */
1481             attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */
1482             attrs |= extract32(tableattrs, 3, 1) << 7;    /* APT[1] => AP[2] */
1483         }
1484     }
1485 
1486     ap = extract32(attrs, 6, 2);
1487     if (regime_is_stage2(mmu_idx)) {
1488         ns = mmu_idx == ARMMMUIdx_Stage2;
1489         xn = extract64(attrs, 53, 2);
1490         result->f.prot = get_S2prot(env, ap, xn, s1_is_el0);
1491     } else {
1492         ns = extract32(attrs, 5, 1);
1493         xn = extract64(attrs, 54, 1);
1494         pxn = extract64(attrs, 53, 1);
1495         result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
1496     }
1497 
1498     if (!(result->f.prot & (1 << access_type))) {
1499         fi->type = ARMFault_Permission;
1500         goto do_fault;
1501     }
1502 
1503     /* If FEAT_HAFDBS has made changes, update the PTE. */
1504     if (new_descriptor != descriptor) {
1505         new_descriptor = arm_casq_ptw(env, descriptor, new_descriptor, ptw, fi);
1506         if (fi->type != ARMFault_None) {
1507             goto do_fault;
1508         }
1509         /*
1510          * I_YZSVV says that if the in-memory descriptor has changed,
1511          * then we must use the information in that new value
1512          * (which might include a different output address, different
1513          * attributes, or generate a fault).
1514          * Restart the handling of the descriptor value from scratch.
1515          */
1516         if (new_descriptor != descriptor) {
1517             descriptor = new_descriptor;
1518             goto restart_atomic_update;
1519         }
1520     }
1521 
1522     if (ns) {
1523         /*
1524          * The NS bit will (as required by the architecture) have no effect if
1525          * the CPU doesn't support TZ or this is a non-secure translation
1526          * regime, because the attribute will already be non-secure.
1527          */
1528         result->f.attrs.secure = false;
1529     }
1530 
1531     /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB.  */
1532     if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
1533         result->f.guarded = extract64(attrs, 50, 1); /* GP */
1534     }
1535 
1536     if (regime_is_stage2(mmu_idx)) {
1537         result->cacheattrs.is_s2_format = true;
1538         result->cacheattrs.attrs = extract32(attrs, 2, 4);
1539     } else {
1540         /* Index into MAIR registers for cache attributes */
1541         uint8_t attrindx = extract32(attrs, 2, 3);
1542         uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
1543         assert(attrindx <= 7);
1544         result->cacheattrs.is_s2_format = false;
1545         result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
1546     }
1547 
1548     /*
1549      * For FEAT_LPA2 and effective DS, the SH field in the attributes
1550      * was re-purposed for output address bits.  The SH attribute in
1551      * that case comes from TCR_ELx, which we extracted earlier.
1552      */
1553     if (param.ds) {
1554         result->cacheattrs.shareability = param.sh;
1555     } else {
1556         result->cacheattrs.shareability = extract32(attrs, 8, 2);
1557     }
1558 
1559     result->f.phys_addr = descaddr;
1560     result->f.lg_page_size = ctz64(page_size);
1561     return false;
1562 
1563  do_translation_fault:
1564     fi->type = ARMFault_Translation;
1565  do_fault:
1566     fi->level = level;
1567     /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
1568     fi->stage2 = fi->s1ptw || regime_is_stage2(mmu_idx);
1569     fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
1570     return true;
1571 }
1572 
1573 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
1574                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
1575                                  bool is_secure, GetPhysAddrResult *result,
1576                                  ARMMMUFaultInfo *fi)
1577 {
1578     int n;
1579     uint32_t mask;
1580     uint32_t base;
1581     bool is_user = regime_is_user(env, mmu_idx);
1582 
1583     if (regime_translation_disabled(env, mmu_idx, is_secure)) {
1584         /* MPU disabled.  */
1585         result->f.phys_addr = address;
1586         result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1587         return false;
1588     }
1589 
1590     result->f.phys_addr = address;
1591     for (n = 7; n >= 0; n--) {
1592         base = env->cp15.c6_region[n];
1593         if ((base & 1) == 0) {
1594             continue;
1595         }
1596         mask = 1 << ((base >> 1) & 0x1f);
1597         /* Keep this shift separate from the above to avoid an
1598            (undefined) << 32.  */
1599         mask = (mask << 1) - 1;
1600         if (((base ^ address) & ~mask) == 0) {
1601             break;
1602         }
1603     }
1604     if (n < 0) {
1605         fi->type = ARMFault_Background;
1606         return true;
1607     }
1608 
1609     if (access_type == MMU_INST_FETCH) {
1610         mask = env->cp15.pmsav5_insn_ap;
1611     } else {
1612         mask = env->cp15.pmsav5_data_ap;
1613     }
1614     mask = (mask >> (n * 4)) & 0xf;
1615     switch (mask) {
1616     case 0:
1617         fi->type = ARMFault_Permission;
1618         fi->level = 1;
1619         return true;
1620     case 1:
1621         if (is_user) {
1622             fi->type = ARMFault_Permission;
1623             fi->level = 1;
1624             return true;
1625         }
1626         result->f.prot = PAGE_READ | PAGE_WRITE;
1627         break;
1628     case 2:
1629         result->f.prot = PAGE_READ;
1630         if (!is_user) {
1631             result->f.prot |= PAGE_WRITE;
1632         }
1633         break;
1634     case 3:
1635         result->f.prot = PAGE_READ | PAGE_WRITE;
1636         break;
1637     case 5:
1638         if (is_user) {
1639             fi->type = ARMFault_Permission;
1640             fi->level = 1;
1641             return true;
1642         }
1643         result->f.prot = PAGE_READ;
1644         break;
1645     case 6:
1646         result->f.prot = PAGE_READ;
1647         break;
1648     default:
1649         /* Bad permission.  */
1650         fi->type = ARMFault_Permission;
1651         fi->level = 1;
1652         return true;
1653     }
1654     result->f.prot |= PAGE_EXEC;
1655     return false;
1656 }
1657 
1658 static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx,
1659                                          int32_t address, uint8_t *prot)
1660 {
1661     if (!arm_feature(env, ARM_FEATURE_M)) {
1662         *prot = PAGE_READ | PAGE_WRITE;
1663         switch (address) {
1664         case 0xF0000000 ... 0xFFFFFFFF:
1665             if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
1666                 /* hivecs execing is ok */
1667                 *prot |= PAGE_EXEC;
1668             }
1669             break;
1670         case 0x00000000 ... 0x7FFFFFFF:
1671             *prot |= PAGE_EXEC;
1672             break;
1673         }
1674     } else {
1675         /* Default system address map for M profile cores.
1676          * The architecture specifies which regions are execute-never;
1677          * at the MPU level no other checks are defined.
1678          */
1679         switch (address) {
1680         case 0x00000000 ... 0x1fffffff: /* ROM */
1681         case 0x20000000 ... 0x3fffffff: /* SRAM */
1682         case 0x60000000 ... 0x7fffffff: /* RAM */
1683         case 0x80000000 ... 0x9fffffff: /* RAM */
1684             *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1685             break;
1686         case 0x40000000 ... 0x5fffffff: /* Peripheral */
1687         case 0xa0000000 ... 0xbfffffff: /* Device */
1688         case 0xc0000000 ... 0xdfffffff: /* Device */
1689         case 0xe0000000 ... 0xffffffff: /* System */
1690             *prot = PAGE_READ | PAGE_WRITE;
1691             break;
1692         default:
1693             g_assert_not_reached();
1694         }
1695     }
1696 }
1697 
1698 static bool m_is_ppb_region(CPUARMState *env, uint32_t address)
1699 {
1700     /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
1701     return arm_feature(env, ARM_FEATURE_M) &&
1702         extract32(address, 20, 12) == 0xe00;
1703 }
1704 
1705 static bool m_is_system_region(CPUARMState *env, uint32_t address)
1706 {
1707     /*
1708      * True if address is in the M profile system region
1709      * 0xe0000000 - 0xffffffff
1710      */
1711     return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
1712 }
1713 
1714 static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1715                                          bool is_secure, bool is_user)
1716 {
1717     /*
1718      * Return true if we should use the default memory map as a
1719      * "background" region if there are no hits against any MPU regions.
1720      */
1721     CPUARMState *env = &cpu->env;
1722 
1723     if (is_user) {
1724         return false;
1725     }
1726 
1727     if (arm_feature(env, ARM_FEATURE_M)) {
1728         return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
1729     } else {
1730         return regime_sctlr(env, mmu_idx) & SCTLR_BR;
1731     }
1732 }
1733 
1734 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
1735                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
1736                                  bool secure, GetPhysAddrResult *result,
1737                                  ARMMMUFaultInfo *fi)
1738 {
1739     ARMCPU *cpu = env_archcpu(env);
1740     int n;
1741     bool is_user = regime_is_user(env, mmu_idx);
1742 
1743     result->f.phys_addr = address;
1744     result->f.lg_page_size = TARGET_PAGE_BITS;
1745     result->f.prot = 0;
1746 
1747     if (regime_translation_disabled(env, mmu_idx, secure) ||
1748         m_is_ppb_region(env, address)) {
1749         /*
1750          * MPU disabled or M profile PPB access: use default memory map.
1751          * The other case which uses the default memory map in the
1752          * v7M ARM ARM pseudocode is exception vector reads from the vector
1753          * table. In QEMU those accesses are done in arm_v7m_load_vector(),
1754          * which always does a direct read using address_space_ldl(), rather
1755          * than going via this function, so we don't need to check that here.
1756          */
1757         get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
1758     } else { /* MPU enabled */
1759         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
1760             /* region search */
1761             uint32_t base = env->pmsav7.drbar[n];
1762             uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
1763             uint32_t rmask;
1764             bool srdis = false;
1765 
1766             if (!(env->pmsav7.drsr[n] & 0x1)) {
1767                 continue;
1768             }
1769 
1770             if (!rsize) {
1771                 qemu_log_mask(LOG_GUEST_ERROR,
1772                               "DRSR[%d]: Rsize field cannot be 0\n", n);
1773                 continue;
1774             }
1775             rsize++;
1776             rmask = (1ull << rsize) - 1;
1777 
1778             if (base & rmask) {
1779                 qemu_log_mask(LOG_GUEST_ERROR,
1780                               "DRBAR[%d]: 0x%" PRIx32 " misaligned "
1781                               "to DRSR region size, mask = 0x%" PRIx32 "\n",
1782                               n, base, rmask);
1783                 continue;
1784             }
1785 
1786             if (address < base || address > base + rmask) {
1787                 /*
1788                  * Address not in this region. We must check whether the
1789                  * region covers addresses in the same page as our address.
1790                  * In that case we must not report a size that covers the
1791                  * whole page for a subsequent hit against a different MPU
1792                  * region or the background region, because it would result in
1793                  * incorrect TLB hits for subsequent accesses to addresses that
1794                  * are in this MPU region.
1795                  */
1796                 if (ranges_overlap(base, rmask,
1797                                    address & TARGET_PAGE_MASK,
1798                                    TARGET_PAGE_SIZE)) {
1799                     result->f.lg_page_size = 0;
1800                 }
1801                 continue;
1802             }
1803 
1804             /* Region matched */
1805 
1806             if (rsize >= 8) { /* no subregions for regions < 256 bytes */
1807                 int i, snd;
1808                 uint32_t srdis_mask;
1809 
1810                 rsize -= 3; /* sub region size (power of 2) */
1811                 snd = ((address - base) >> rsize) & 0x7;
1812                 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
1813 
1814                 srdis_mask = srdis ? 0x3 : 0x0;
1815                 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
1816                     /*
1817                      * This will check in groups of 2, 4 and then 8, whether
1818                      * the subregion bits are consistent. rsize is incremented
1819                      * back up to give the region size, considering consistent
1820                      * adjacent subregions as one region. Stop testing if rsize
1821                      * is already big enough for an entire QEMU page.
1822                      */
1823                     int snd_rounded = snd & ~(i - 1);
1824                     uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
1825                                                      snd_rounded + 8, i);
1826                     if (srdis_mask ^ srdis_multi) {
1827                         break;
1828                     }
1829                     srdis_mask = (srdis_mask << i) | srdis_mask;
1830                     rsize++;
1831                 }
1832             }
1833             if (srdis) {
1834                 continue;
1835             }
1836             if (rsize < TARGET_PAGE_BITS) {
1837                 result->f.lg_page_size = rsize;
1838             }
1839             break;
1840         }
1841 
1842         if (n == -1) { /* no hits */
1843             if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
1844                 /* background fault */
1845                 fi->type = ARMFault_Background;
1846                 return true;
1847             }
1848             get_phys_addr_pmsav7_default(env, mmu_idx, address,
1849                                          &result->f.prot);
1850         } else { /* a MPU hit! */
1851             uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
1852             uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
1853 
1854             if (m_is_system_region(env, address)) {
1855                 /* System space is always execute never */
1856                 xn = 1;
1857             }
1858 
1859             if (is_user) { /* User mode AP bit decoding */
1860                 switch (ap) {
1861                 case 0:
1862                 case 1:
1863                 case 5:
1864                     break; /* no access */
1865                 case 3:
1866                     result->f.prot |= PAGE_WRITE;
1867                     /* fall through */
1868                 case 2:
1869                 case 6:
1870                     result->f.prot |= PAGE_READ | PAGE_EXEC;
1871                     break;
1872                 case 7:
1873                     /* for v7M, same as 6; for R profile a reserved value */
1874                     if (arm_feature(env, ARM_FEATURE_M)) {
1875                         result->f.prot |= PAGE_READ | PAGE_EXEC;
1876                         break;
1877                     }
1878                     /* fall through */
1879                 default:
1880                     qemu_log_mask(LOG_GUEST_ERROR,
1881                                   "DRACR[%d]: Bad value for AP bits: 0x%"
1882                                   PRIx32 "\n", n, ap);
1883                 }
1884             } else { /* Priv. mode AP bits decoding */
1885                 switch (ap) {
1886                 case 0:
1887                     break; /* no access */
1888                 case 1:
1889                 case 2:
1890                 case 3:
1891                     result->f.prot |= PAGE_WRITE;
1892                     /* fall through */
1893                 case 5:
1894                 case 6:
1895                     result->f.prot |= PAGE_READ | PAGE_EXEC;
1896                     break;
1897                 case 7:
1898                     /* for v7M, same as 6; for R profile a reserved value */
1899                     if (arm_feature(env, ARM_FEATURE_M)) {
1900                         result->f.prot |= PAGE_READ | PAGE_EXEC;
1901                         break;
1902                     }
1903                     /* fall through */
1904                 default:
1905                     qemu_log_mask(LOG_GUEST_ERROR,
1906                                   "DRACR[%d]: Bad value for AP bits: 0x%"
1907                                   PRIx32 "\n", n, ap);
1908                 }
1909             }
1910 
1911             /* execute never */
1912             if (xn) {
1913                 result->f.prot &= ~PAGE_EXEC;
1914             }
1915         }
1916     }
1917 
1918     fi->type = ARMFault_Permission;
1919     fi->level = 1;
1920     return !(result->f.prot & (1 << access_type));
1921 }
1922 
1923 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1924                        MMUAccessType access_type, ARMMMUIdx mmu_idx,
1925                        bool secure, GetPhysAddrResult *result,
1926                        ARMMMUFaultInfo *fi, uint32_t *mregion)
1927 {
1928     /*
1929      * Perform a PMSAv8 MPU lookup (without also doing the SAU check
1930      * that a full phys-to-virt translation does).
1931      * mregion is (if not NULL) set to the region number which matched,
1932      * or -1 if no region number is returned (MPU off, address did not
1933      * hit a region, address hit in multiple regions).
1934      * If the region hit doesn't cover the entire TARGET_PAGE the address
1935      * is within, then we set the result page_size to 1 to force the
1936      * memory system to use a subpage.
1937      */
1938     ARMCPU *cpu = env_archcpu(env);
1939     bool is_user = regime_is_user(env, mmu_idx);
1940     int n;
1941     int matchregion = -1;
1942     bool hit = false;
1943     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
1944     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
1945 
1946     result->f.lg_page_size = TARGET_PAGE_BITS;
1947     result->f.phys_addr = address;
1948     result->f.prot = 0;
1949     if (mregion) {
1950         *mregion = -1;
1951     }
1952 
1953     /*
1954      * Unlike the ARM ARM pseudocode, we don't need to check whether this
1955      * was an exception vector read from the vector table (which is always
1956      * done using the default system address map), because those accesses
1957      * are done in arm_v7m_load_vector(), which always does a direct
1958      * read using address_space_ldl(), rather than going via this function.
1959      */
1960     if (regime_translation_disabled(env, mmu_idx, secure)) { /* MPU disabled */
1961         hit = true;
1962     } else if (m_is_ppb_region(env, address)) {
1963         hit = true;
1964     } else {
1965         if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
1966             hit = true;
1967         }
1968 
1969         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
1970             /* region search */
1971             /*
1972              * Note that the base address is bits [31:5] from the register
1973              * with bits [4:0] all zeroes, but the limit address is bits
1974              * [31:5] from the register with bits [4:0] all ones.
1975              */
1976             uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
1977             uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
1978 
1979             if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
1980                 /* Region disabled */
1981                 continue;
1982             }
1983 
1984             if (address < base || address > limit) {
1985                 /*
1986                  * Address not in this region. We must check whether the
1987                  * region covers addresses in the same page as our address.
1988                  * In that case we must not report a size that covers the
1989                  * whole page for a subsequent hit against a different MPU
1990                  * region or the background region, because it would result in
1991                  * incorrect TLB hits for subsequent accesses to addresses that
1992                  * are in this MPU region.
1993                  */
1994                 if (limit >= base &&
1995                     ranges_overlap(base, limit - base + 1,
1996                                    addr_page_base,
1997                                    TARGET_PAGE_SIZE)) {
1998                     result->f.lg_page_size = 0;
1999                 }
2000                 continue;
2001             }
2002 
2003             if (base > addr_page_base || limit < addr_page_limit) {
2004                 result->f.lg_page_size = 0;
2005             }
2006 
2007             if (matchregion != -1) {
2008                 /*
2009                  * Multiple regions match -- always a failure (unlike
2010                  * PMSAv7 where highest-numbered-region wins)
2011                  */
2012                 fi->type = ARMFault_Permission;
2013                 fi->level = 1;
2014                 return true;
2015             }
2016 
2017             matchregion = n;
2018             hit = true;
2019         }
2020     }
2021 
2022     if (!hit) {
2023         /* background fault */
2024         fi->type = ARMFault_Background;
2025         return true;
2026     }
2027 
2028     if (matchregion == -1) {
2029         /* hit using the background region */
2030         get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
2031     } else {
2032         uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
2033         uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
2034         bool pxn = false;
2035 
2036         if (arm_feature(env, ARM_FEATURE_V8_1M)) {
2037             pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
2038         }
2039 
2040         if (m_is_system_region(env, address)) {
2041             /* System space is always execute never */
2042             xn = 1;
2043         }
2044 
2045         result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
2046         if (result->f.prot && !xn && !(pxn && !is_user)) {
2047             result->f.prot |= PAGE_EXEC;
2048         }
2049         /*
2050          * We don't need to look the attribute up in the MAIR0/MAIR1
2051          * registers because that only tells us about cacheability.
2052          */
2053         if (mregion) {
2054             *mregion = matchregion;
2055         }
2056     }
2057 
2058     fi->type = ARMFault_Permission;
2059     fi->level = 1;
2060     return !(result->f.prot & (1 << access_type));
2061 }
2062 
2063 static bool v8m_is_sau_exempt(CPUARMState *env,
2064                               uint32_t address, MMUAccessType access_type)
2065 {
2066     /*
2067      * The architecture specifies that certain address ranges are
2068      * exempt from v8M SAU/IDAU checks.
2069      */
2070     return
2071         (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
2072         (address >= 0xe0000000 && address <= 0xe0002fff) ||
2073         (address >= 0xe000e000 && address <= 0xe000efff) ||
2074         (address >= 0xe002e000 && address <= 0xe002efff) ||
2075         (address >= 0xe0040000 && address <= 0xe0041fff) ||
2076         (address >= 0xe00ff000 && address <= 0xe00fffff);
2077 }
2078 
2079 void v8m_security_lookup(CPUARMState *env, uint32_t address,
2080                          MMUAccessType access_type, ARMMMUIdx mmu_idx,
2081                          bool is_secure, V8M_SAttributes *sattrs)
2082 {
2083     /*
2084      * Look up the security attributes for this address. Compare the
2085      * pseudocode SecurityCheck() function.
2086      * We assume the caller has zero-initialized *sattrs.
2087      */
2088     ARMCPU *cpu = env_archcpu(env);
2089     int r;
2090     bool idau_exempt = false, idau_ns = true, idau_nsc = true;
2091     int idau_region = IREGION_NOTVALID;
2092     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
2093     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
2094 
2095     if (cpu->idau) {
2096         IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
2097         IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
2098 
2099         iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
2100                    &idau_nsc);
2101     }
2102 
2103     if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
2104         /* 0xf0000000..0xffffffff is always S for insn fetches */
2105         return;
2106     }
2107 
2108     if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
2109         sattrs->ns = !is_secure;
2110         return;
2111     }
2112 
2113     if (idau_region != IREGION_NOTVALID) {
2114         sattrs->irvalid = true;
2115         sattrs->iregion = idau_region;
2116     }
2117 
2118     switch (env->sau.ctrl & 3) {
2119     case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
2120         break;
2121     case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
2122         sattrs->ns = true;
2123         break;
2124     default: /* SAU.ENABLE == 1 */
2125         for (r = 0; r < cpu->sau_sregion; r++) {
2126             if (env->sau.rlar[r] & 1) {
2127                 uint32_t base = env->sau.rbar[r] & ~0x1f;
2128                 uint32_t limit = env->sau.rlar[r] | 0x1f;
2129 
2130                 if (base <= address && limit >= address) {
2131                     if (base > addr_page_base || limit < addr_page_limit) {
2132                         sattrs->subpage = true;
2133                     }
2134                     if (sattrs->srvalid) {
2135                         /*
2136                          * If we hit in more than one region then we must report
2137                          * as Secure, not NS-Callable, with no valid region
2138                          * number info.
2139                          */
2140                         sattrs->ns = false;
2141                         sattrs->nsc = false;
2142                         sattrs->sregion = 0;
2143                         sattrs->srvalid = false;
2144                         break;
2145                     } else {
2146                         if (env->sau.rlar[r] & 2) {
2147                             sattrs->nsc = true;
2148                         } else {
2149                             sattrs->ns = true;
2150                         }
2151                         sattrs->srvalid = true;
2152                         sattrs->sregion = r;
2153                     }
2154                 } else {
2155                     /*
2156                      * Address not in this region. We must check whether the
2157                      * region covers addresses in the same page as our address.
2158                      * In that case we must not report a size that covers the
2159                      * whole page for a subsequent hit against a different MPU
2160                      * region or the background region, because it would result
2161                      * in incorrect TLB hits for subsequent accesses to
2162                      * addresses that are in this MPU region.
2163                      */
2164                     if (limit >= base &&
2165                         ranges_overlap(base, limit - base + 1,
2166                                        addr_page_base,
2167                                        TARGET_PAGE_SIZE)) {
2168                         sattrs->subpage = true;
2169                     }
2170                 }
2171             }
2172         }
2173         break;
2174     }
2175 
2176     /*
2177      * The IDAU will override the SAU lookup results if it specifies
2178      * higher security than the SAU does.
2179      */
2180     if (!idau_ns) {
2181         if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
2182             sattrs->ns = false;
2183             sattrs->nsc = idau_nsc;
2184         }
2185     }
2186 }
2187 
2188 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
2189                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
2190                                  bool secure, GetPhysAddrResult *result,
2191                                  ARMMMUFaultInfo *fi)
2192 {
2193     V8M_SAttributes sattrs = {};
2194     bool ret;
2195 
2196     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2197         v8m_security_lookup(env, address, access_type, mmu_idx,
2198                             secure, &sattrs);
2199         if (access_type == MMU_INST_FETCH) {
2200             /*
2201              * Instruction fetches always use the MMU bank and the
2202              * transaction attribute determined by the fetch address,
2203              * regardless of CPU state. This is painful for QEMU
2204              * to handle, because it would mean we need to encode
2205              * into the mmu_idx not just the (user, negpri) information
2206              * for the current security state but also that for the
2207              * other security state, which would balloon the number
2208              * of mmu_idx values needed alarmingly.
2209              * Fortunately we can avoid this because it's not actually
2210              * possible to arbitrarily execute code from memory with
2211              * the wrong security attribute: it will always generate
2212              * an exception of some kind or another, apart from the
2213              * special case of an NS CPU executing an SG instruction
2214              * in S&NSC memory. So we always just fail the translation
2215              * here and sort things out in the exception handler
2216              * (including possibly emulating an SG instruction).
2217              */
2218             if (sattrs.ns != !secure) {
2219                 if (sattrs.nsc) {
2220                     fi->type = ARMFault_QEMU_NSCExec;
2221                 } else {
2222                     fi->type = ARMFault_QEMU_SFault;
2223                 }
2224                 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
2225                 result->f.phys_addr = address;
2226                 result->f.prot = 0;
2227                 return true;
2228             }
2229         } else {
2230             /*
2231              * For data accesses we always use the MMU bank indicated
2232              * by the current CPU state, but the security attributes
2233              * might downgrade a secure access to nonsecure.
2234              */
2235             if (sattrs.ns) {
2236                 result->f.attrs.secure = false;
2237             } else if (!secure) {
2238                 /*
2239                  * NS access to S memory must fault.
2240                  * Architecturally we should first check whether the
2241                  * MPU information for this address indicates that we
2242                  * are doing an unaligned access to Device memory, which
2243                  * should generate a UsageFault instead. QEMU does not
2244                  * currently check for that kind of unaligned access though.
2245                  * If we added it we would need to do so as a special case
2246                  * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2247                  */
2248                 fi->type = ARMFault_QEMU_SFault;
2249                 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
2250                 result->f.phys_addr = address;
2251                 result->f.prot = 0;
2252                 return true;
2253             }
2254         }
2255     }
2256 
2257     ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure,
2258                             result, fi, NULL);
2259     if (sattrs.subpage) {
2260         result->f.lg_page_size = 0;
2261     }
2262     return ret;
2263 }
2264 
2265 /*
2266  * Translate from the 4-bit stage 2 representation of
2267  * memory attributes (without cache-allocation hints) to
2268  * the 8-bit representation of the stage 1 MAIR registers
2269  * (which includes allocation hints).
2270  *
2271  * ref: shared/translation/attrs/S2AttrDecode()
2272  *      .../S2ConvertAttrsHints()
2273  */
2274 static uint8_t convert_stage2_attrs(uint64_t hcr, uint8_t s2attrs)
2275 {
2276     uint8_t hiattr = extract32(s2attrs, 2, 2);
2277     uint8_t loattr = extract32(s2attrs, 0, 2);
2278     uint8_t hihint = 0, lohint = 0;
2279 
2280     if (hiattr != 0) { /* normal memory */
2281         if (hcr & HCR_CD) { /* cache disabled */
2282             hiattr = loattr = 1; /* non-cacheable */
2283         } else {
2284             if (hiattr != 1) { /* Write-through or write-back */
2285                 hihint = 3; /* RW allocate */
2286             }
2287             if (loattr != 1) { /* Write-through or write-back */
2288                 lohint = 3; /* RW allocate */
2289             }
2290         }
2291     }
2292 
2293     return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
2294 }
2295 
2296 /*
2297  * Combine either inner or outer cacheability attributes for normal
2298  * memory, according to table D4-42 and pseudocode procedure
2299  * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
2300  *
2301  * NB: only stage 1 includes allocation hints (RW bits), leading to
2302  * some asymmetry.
2303  */
2304 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
2305 {
2306     if (s1 == 4 || s2 == 4) {
2307         /* non-cacheable has precedence */
2308         return 4;
2309     } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
2310         /* stage 1 write-through takes precedence */
2311         return s1;
2312     } else if (extract32(s2, 2, 2) == 2) {
2313         /* stage 2 write-through takes precedence, but the allocation hint
2314          * is still taken from stage 1
2315          */
2316         return (2 << 2) | extract32(s1, 0, 2);
2317     } else { /* write-back */
2318         return s1;
2319     }
2320 }
2321 
2322 /*
2323  * Combine the memory type and cacheability attributes of
2324  * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
2325  * combined attributes in MAIR_EL1 format.
2326  */
2327 static uint8_t combined_attrs_nofwb(uint64_t hcr,
2328                                     ARMCacheAttrs s1, ARMCacheAttrs s2)
2329 {
2330     uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
2331 
2332     s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs);
2333 
2334     s1lo = extract32(s1.attrs, 0, 4);
2335     s2lo = extract32(s2_mair_attrs, 0, 4);
2336     s1hi = extract32(s1.attrs, 4, 4);
2337     s2hi = extract32(s2_mair_attrs, 4, 4);
2338 
2339     /* Combine memory type and cacheability attributes */
2340     if (s1hi == 0 || s2hi == 0) {
2341         /* Device has precedence over normal */
2342         if (s1lo == 0 || s2lo == 0) {
2343             /* nGnRnE has precedence over anything */
2344             ret_attrs = 0;
2345         } else if (s1lo == 4 || s2lo == 4) {
2346             /* non-Reordering has precedence over Reordering */
2347             ret_attrs = 4;  /* nGnRE */
2348         } else if (s1lo == 8 || s2lo == 8) {
2349             /* non-Gathering has precedence over Gathering */
2350             ret_attrs = 8;  /* nGRE */
2351         } else {
2352             ret_attrs = 0xc; /* GRE */
2353         }
2354     } else { /* Normal memory */
2355         /* Outer/inner cacheability combine independently */
2356         ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
2357                   | combine_cacheattr_nibble(s1lo, s2lo);
2358     }
2359     return ret_attrs;
2360 }
2361 
2362 static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
2363 {
2364     /*
2365      * Given the 4 bits specifying the outer or inner cacheability
2366      * in MAIR format, return a value specifying Normal Write-Back,
2367      * with the allocation and transient hints taken from the input
2368      * if the input specified some kind of cacheable attribute.
2369      */
2370     if (attr == 0 || attr == 4) {
2371         /*
2372          * 0 == an UNPREDICTABLE encoding
2373          * 4 == Non-cacheable
2374          * Either way, force Write-Back RW allocate non-transient
2375          */
2376         return 0xf;
2377     }
2378     /* Change WriteThrough to WriteBack, keep allocation and transient hints */
2379     return attr | 4;
2380 }
2381 
2382 /*
2383  * Combine the memory type and cacheability attributes of
2384  * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
2385  * combined attributes in MAIR_EL1 format.
2386  */
2387 static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2)
2388 {
2389     switch (s2.attrs) {
2390     case 7:
2391         /* Use stage 1 attributes */
2392         return s1.attrs;
2393     case 6:
2394         /*
2395          * Force Normal Write-Back. Note that if S1 is Normal cacheable
2396          * then we take the allocation hints from it; otherwise it is
2397          * RW allocate, non-transient.
2398          */
2399         if ((s1.attrs & 0xf0) == 0) {
2400             /* S1 is Device */
2401             return 0xff;
2402         }
2403         /* Need to check the Inner and Outer nibbles separately */
2404         return force_cacheattr_nibble_wb(s1.attrs & 0xf) |
2405             force_cacheattr_nibble_wb(s1.attrs >> 4) << 4;
2406     case 5:
2407         /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
2408         if ((s1.attrs & 0xf0) == 0) {
2409             return s1.attrs;
2410         }
2411         return 0x44;
2412     case 0 ... 3:
2413         /* Force Device, of subtype specified by S2 */
2414         return s2.attrs << 2;
2415     default:
2416         /*
2417          * RESERVED values (including RES0 descriptor bit [5] being nonzero);
2418          * arbitrarily force Device.
2419          */
2420         return 0;
2421     }
2422 }
2423 
2424 /*
2425  * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
2426  * and CombineS1S2Desc()
2427  *
2428  * @env:     CPUARMState
2429  * @s1:      Attributes from stage 1 walk
2430  * @s2:      Attributes from stage 2 walk
2431  */
2432 static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
2433                                         ARMCacheAttrs s1, ARMCacheAttrs s2)
2434 {
2435     ARMCacheAttrs ret;
2436     bool tagged = false;
2437 
2438     assert(s2.is_s2_format && !s1.is_s2_format);
2439     ret.is_s2_format = false;
2440 
2441     if (s1.attrs == 0xf0) {
2442         tagged = true;
2443         s1.attrs = 0xff;
2444     }
2445 
2446     /* Combine shareability attributes (table D4-43) */
2447     if (s1.shareability == 2 || s2.shareability == 2) {
2448         /* if either are outer-shareable, the result is outer-shareable */
2449         ret.shareability = 2;
2450     } else if (s1.shareability == 3 || s2.shareability == 3) {
2451         /* if either are inner-shareable, the result is inner-shareable */
2452         ret.shareability = 3;
2453     } else {
2454         /* both non-shareable */
2455         ret.shareability = 0;
2456     }
2457 
2458     /* Combine memory type and cacheability attributes */
2459     if (hcr & HCR_FWB) {
2460         ret.attrs = combined_attrs_fwb(s1, s2);
2461     } else {
2462         ret.attrs = combined_attrs_nofwb(hcr, s1, s2);
2463     }
2464 
2465     /*
2466      * Any location for which the resultant memory type is any
2467      * type of Device memory is always treated as Outer Shareable.
2468      * Any location for which the resultant memory type is Normal
2469      * Inner Non-cacheable, Outer Non-cacheable is always treated
2470      * as Outer Shareable.
2471      * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
2472      */
2473     if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) {
2474         ret.shareability = 2;
2475     }
2476 
2477     /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
2478     if (tagged && ret.attrs == 0xff) {
2479         ret.attrs = 0xf0;
2480     }
2481 
2482     return ret;
2483 }
2484 
2485 /*
2486  * MMU disabled.  S1 addresses within aa64 translation regimes are
2487  * still checked for bounds -- see AArch64.S1DisabledOutput().
2488  */
2489 static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
2490                                    MMUAccessType access_type,
2491                                    ARMMMUIdx mmu_idx, bool is_secure,
2492                                    GetPhysAddrResult *result,
2493                                    ARMMMUFaultInfo *fi)
2494 {
2495     uint8_t memattr = 0x00;    /* Device nGnRnE */
2496     uint8_t shareability = 0;  /* non-sharable */
2497     int r_el;
2498 
2499     switch (mmu_idx) {
2500     case ARMMMUIdx_Stage2:
2501     case ARMMMUIdx_Stage2_S:
2502     case ARMMMUIdx_Phys_NS:
2503     case ARMMMUIdx_Phys_S:
2504         break;
2505 
2506     default:
2507         r_el = regime_el(env, mmu_idx);
2508         if (arm_el_is_aa64(env, r_el)) {
2509             int pamax = arm_pamax(env_archcpu(env));
2510             uint64_t tcr = env->cp15.tcr_el[r_el];
2511             int addrtop, tbi;
2512 
2513             tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
2514             if (access_type == MMU_INST_FETCH) {
2515                 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
2516             }
2517             tbi = (tbi >> extract64(address, 55, 1)) & 1;
2518             addrtop = (tbi ? 55 : 63);
2519 
2520             if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
2521                 fi->type = ARMFault_AddressSize;
2522                 fi->level = 0;
2523                 fi->stage2 = false;
2524                 return 1;
2525             }
2526 
2527             /*
2528              * When TBI is disabled, we've just validated that all of the
2529              * bits above PAMax are zero, so logically we only need to
2530              * clear the top byte for TBI.  But it's clearer to follow
2531              * the pseudocode set of addrdesc.paddress.
2532              */
2533             address = extract64(address, 0, 52);
2534         }
2535 
2536         /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
2537         if (r_el == 1) {
2538             uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
2539             if (hcr & HCR_DC) {
2540                 if (hcr & HCR_DCT) {
2541                     memattr = 0xf0;  /* Tagged, Normal, WB, RWA */
2542                 } else {
2543                     memattr = 0xff;  /* Normal, WB, RWA */
2544                 }
2545             }
2546         }
2547         if (memattr == 0 && access_type == MMU_INST_FETCH) {
2548             if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
2549                 memattr = 0xee;  /* Normal, WT, RA, NT */
2550             } else {
2551                 memattr = 0x44;  /* Normal, NC, No */
2552             }
2553             shareability = 2; /* outer sharable */
2554         }
2555         result->cacheattrs.is_s2_format = false;
2556         break;
2557     }
2558 
2559     result->f.phys_addr = address;
2560     result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
2561     result->f.lg_page_size = TARGET_PAGE_BITS;
2562     result->cacheattrs.shareability = shareability;
2563     result->cacheattrs.attrs = memattr;
2564     return false;
2565 }
2566 
2567 static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
2568                                    target_ulong address,
2569                                    MMUAccessType access_type,
2570                                    GetPhysAddrResult *result,
2571                                    ARMMMUFaultInfo *fi)
2572 {
2573     hwaddr ipa;
2574     int s1_prot, s1_lgpgsz;
2575     bool is_secure = ptw->in_secure;
2576     bool ret, ipa_secure, s2walk_secure;
2577     ARMCacheAttrs cacheattrs1;
2578     bool is_el0;
2579     uint64_t hcr;
2580 
2581     ret = get_phys_addr_with_struct(env, ptw, address, access_type, result, fi);
2582 
2583     /* If S1 fails or S2 is disabled, return early.  */
2584     if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2, is_secure)) {
2585         return ret;
2586     }
2587 
2588     ipa = result->f.phys_addr;
2589     ipa_secure = result->f.attrs.secure;
2590     if (is_secure) {
2591         /* Select TCR based on the NS bit from the S1 walk. */
2592         s2walk_secure = !(ipa_secure
2593                           ? env->cp15.vstcr_el2 & VSTCR_SW
2594                           : env->cp15.vtcr_el2 & VTCR_NSW);
2595     } else {
2596         assert(!ipa_secure);
2597         s2walk_secure = false;
2598     }
2599 
2600     is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
2601     ptw->in_mmu_idx = s2walk_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
2602     ptw->in_ptw_idx = s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
2603     ptw->in_secure = s2walk_secure;
2604 
2605     /*
2606      * S1 is done, now do S2 translation.
2607      * Save the stage1 results so that we may merge prot and cacheattrs later.
2608      */
2609     s1_prot = result->f.prot;
2610     s1_lgpgsz = result->f.lg_page_size;
2611     cacheattrs1 = result->cacheattrs;
2612     memset(result, 0, sizeof(*result));
2613 
2614     ret = get_phys_addr_lpae(env, ptw, ipa, access_type, is_el0, result, fi);
2615     fi->s2addr = ipa;
2616 
2617     /* Combine the S1 and S2 perms.  */
2618     result->f.prot &= s1_prot;
2619 
2620     /* If S2 fails, return early.  */
2621     if (ret) {
2622         return ret;
2623     }
2624 
2625     /*
2626      * Use the maximum of the S1 & S2 page size, so that invalidation
2627      * of pages > TARGET_PAGE_SIZE works correctly.
2628      */
2629     if (result->f.lg_page_size < s1_lgpgsz) {
2630         result->f.lg_page_size = s1_lgpgsz;
2631     }
2632 
2633     /* Combine the S1 and S2 cache attributes. */
2634     hcr = arm_hcr_el2_eff_secstate(env, is_secure);
2635     if (hcr & HCR_DC) {
2636         /*
2637          * HCR.DC forces the first stage attributes to
2638          *  Normal Non-Shareable,
2639          *  Inner Write-Back Read-Allocate Write-Allocate,
2640          *  Outer Write-Back Read-Allocate Write-Allocate.
2641          * Do not overwrite Tagged within attrs.
2642          */
2643         if (cacheattrs1.attrs != 0xf0) {
2644             cacheattrs1.attrs = 0xff;
2645         }
2646         cacheattrs1.shareability = 0;
2647     }
2648     result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
2649                                             result->cacheattrs);
2650 
2651     /*
2652      * Check if IPA translates to secure or non-secure PA space.
2653      * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
2654      */
2655     result->f.attrs.secure =
2656         (is_secure
2657          && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
2658          && (ipa_secure
2659              || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
2660 
2661     return false;
2662 }
2663 
2664 static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
2665                                       target_ulong address,
2666                                       MMUAccessType access_type,
2667                                       GetPhysAddrResult *result,
2668                                       ARMMMUFaultInfo *fi)
2669 {
2670     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
2671     bool is_secure = ptw->in_secure;
2672     ARMMMUIdx s1_mmu_idx;
2673 
2674     switch (mmu_idx) {
2675     case ARMMMUIdx_Phys_S:
2676     case ARMMMUIdx_Phys_NS:
2677         /* Checking Phys early avoids special casing later vs regime_el. */
2678         return get_phys_addr_disabled(env, address, access_type, mmu_idx,
2679                                       is_secure, result, fi);
2680 
2681     case ARMMMUIdx_Stage1_E0:
2682     case ARMMMUIdx_Stage1_E1:
2683     case ARMMMUIdx_Stage1_E1_PAN:
2684         /* First stage lookup uses second stage for ptw. */
2685         ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
2686         break;
2687 
2688     case ARMMMUIdx_E10_0:
2689         s1_mmu_idx = ARMMMUIdx_Stage1_E0;
2690         goto do_twostage;
2691     case ARMMMUIdx_E10_1:
2692         s1_mmu_idx = ARMMMUIdx_Stage1_E1;
2693         goto do_twostage;
2694     case ARMMMUIdx_E10_1_PAN:
2695         s1_mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
2696     do_twostage:
2697         /*
2698          * Call ourselves recursively to do the stage 1 and then stage 2
2699          * translations if mmu_idx is a two-stage regime, and EL2 present.
2700          * Otherwise, a stage1+stage2 translation is just stage 1.
2701          */
2702         ptw->in_mmu_idx = mmu_idx = s1_mmu_idx;
2703         if (arm_feature(env, ARM_FEATURE_EL2)) {
2704             return get_phys_addr_twostage(env, ptw, address, access_type,
2705                                           result, fi);
2706         }
2707         /* fall through */
2708 
2709     default:
2710         /* Single stage and second stage uses physical for ptw. */
2711         ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
2712         break;
2713     }
2714 
2715     /*
2716      * The page table entries may downgrade secure to non-secure, but
2717      * cannot upgrade an non-secure translation regime's attributes
2718      * to secure.
2719      */
2720     result->f.attrs.secure = is_secure;
2721     result->f.attrs.user = regime_is_user(env, mmu_idx);
2722 
2723     /*
2724      * Fast Context Switch Extension. This doesn't exist at all in v8.
2725      * In v7 and earlier it affects all stage 1 translations.
2726      */
2727     if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
2728         && !arm_feature(env, ARM_FEATURE_V8)) {
2729         if (regime_el(env, mmu_idx) == 3) {
2730             address += env->cp15.fcseidr_s;
2731         } else {
2732             address += env->cp15.fcseidr_ns;
2733         }
2734     }
2735 
2736     if (arm_feature(env, ARM_FEATURE_PMSA)) {
2737         bool ret;
2738         result->f.lg_page_size = TARGET_PAGE_BITS;
2739 
2740         if (arm_feature(env, ARM_FEATURE_V8)) {
2741             /* PMSAv8 */
2742             ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
2743                                        is_secure, result, fi);
2744         } else if (arm_feature(env, ARM_FEATURE_V7)) {
2745             /* PMSAv7 */
2746             ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
2747                                        is_secure, result, fi);
2748         } else {
2749             /* Pre-v7 MPU */
2750             ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
2751                                        is_secure, result, fi);
2752         }
2753         qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
2754                       " mmu_idx %u -> %s (prot %c%c%c)\n",
2755                       access_type == MMU_DATA_LOAD ? "reading" :
2756                       (access_type == MMU_DATA_STORE ? "writing" : "execute"),
2757                       (uint32_t)address, mmu_idx,
2758                       ret ? "Miss" : "Hit",
2759                       result->f.prot & PAGE_READ ? 'r' : '-',
2760                       result->f.prot & PAGE_WRITE ? 'w' : '-',
2761                       result->f.prot & PAGE_EXEC ? 'x' : '-');
2762 
2763         return ret;
2764     }
2765 
2766     /* Definitely a real MMU, not an MPU */
2767 
2768     if (regime_translation_disabled(env, mmu_idx, is_secure)) {
2769         return get_phys_addr_disabled(env, address, access_type, mmu_idx,
2770                                       is_secure, result, fi);
2771     }
2772 
2773     if (regime_using_lpae_format(env, mmu_idx)) {
2774         return get_phys_addr_lpae(env, ptw, address, access_type, false,
2775                                   result, fi);
2776     } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
2777         return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
2778     } else {
2779         return get_phys_addr_v5(env, ptw, address, access_type, result, fi);
2780     }
2781 }
2782 
2783 bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
2784                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
2785                                bool is_secure, GetPhysAddrResult *result,
2786                                ARMMMUFaultInfo *fi)
2787 {
2788     S1Translate ptw = {
2789         .in_mmu_idx = mmu_idx,
2790         .in_secure = is_secure,
2791     };
2792     return get_phys_addr_with_struct(env, &ptw, address, access_type,
2793                                      result, fi);
2794 }
2795 
2796 bool get_phys_addr(CPUARMState *env, target_ulong address,
2797                    MMUAccessType access_type, ARMMMUIdx mmu_idx,
2798                    GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
2799 {
2800     bool is_secure;
2801 
2802     switch (mmu_idx) {
2803     case ARMMMUIdx_E10_0:
2804     case ARMMMUIdx_E10_1:
2805     case ARMMMUIdx_E10_1_PAN:
2806     case ARMMMUIdx_E20_0:
2807     case ARMMMUIdx_E20_2:
2808     case ARMMMUIdx_E20_2_PAN:
2809     case ARMMMUIdx_Stage1_E0:
2810     case ARMMMUIdx_Stage1_E1:
2811     case ARMMMUIdx_Stage1_E1_PAN:
2812     case ARMMMUIdx_E2:
2813         is_secure = arm_is_secure_below_el3(env);
2814         break;
2815     case ARMMMUIdx_Stage2:
2816     case ARMMMUIdx_Phys_NS:
2817     case ARMMMUIdx_MPrivNegPri:
2818     case ARMMMUIdx_MUserNegPri:
2819     case ARMMMUIdx_MPriv:
2820     case ARMMMUIdx_MUser:
2821         is_secure = false;
2822         break;
2823     case ARMMMUIdx_E3:
2824     case ARMMMUIdx_Stage2_S:
2825     case ARMMMUIdx_Phys_S:
2826     case ARMMMUIdx_MSPrivNegPri:
2827     case ARMMMUIdx_MSUserNegPri:
2828     case ARMMMUIdx_MSPriv:
2829     case ARMMMUIdx_MSUser:
2830         is_secure = true;
2831         break;
2832     default:
2833         g_assert_not_reached();
2834     }
2835     return get_phys_addr_with_secure(env, address, access_type, mmu_idx,
2836                                      is_secure, result, fi);
2837 }
2838 
2839 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
2840                                          MemTxAttrs *attrs)
2841 {
2842     ARMCPU *cpu = ARM_CPU(cs);
2843     CPUARMState *env = &cpu->env;
2844     S1Translate ptw = {
2845         .in_mmu_idx = arm_mmu_idx(env),
2846         .in_secure = arm_is_secure(env),
2847         .in_debug = true,
2848     };
2849     GetPhysAddrResult res = {};
2850     ARMMMUFaultInfo fi = {};
2851     bool ret;
2852 
2853     ret = get_phys_addr_with_struct(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
2854     *attrs = res.f.attrs;
2855 
2856     if (ret) {
2857         return -1;
2858     }
2859     return res.f.phys_addr;
2860 }
2861