xref: /qemu/target/arm/tcg/m_helper.c (revision 6e0dc9d2)
1 /*
2  * ARM generic helpers.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "cpu.h"
11 #include "internals.h"
12 #include "gdbstub/helpers.h"
13 #include "exec/helper-proto.h"
14 #include "qemu/main-loop.h"
15 #include "qemu/bitops.h"
16 #include "qemu/log.h"
17 #include "exec/exec-all.h"
18 #ifdef CONFIG_TCG
19 #include "exec/cpu_ldst.h"
20 #include "semihosting/common-semi.h"
21 #endif
22 #if !defined(CONFIG_USER_ONLY)
23 #include "hw/intc/armv7m_nvic.h"
24 #endif
25 
26 static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask,
27                          uint32_t reg, uint32_t val)
28 {
29     /* Only APSR is actually writable */
30     if (!(reg & 4)) {
31         uint32_t apsrmask = 0;
32 
33         if (mask & 8) {
34             apsrmask |= XPSR_NZCV | XPSR_Q;
35         }
36         if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
37             apsrmask |= XPSR_GE;
38         }
39         xpsr_write(env, val, apsrmask);
40     }
41 }
42 
43 static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el)
44 {
45     uint32_t mask = 0;
46 
47     if ((reg & 1) && el) {
48         mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
49     }
50     if (!(reg & 4)) {
51         mask |= XPSR_NZCV | XPSR_Q; /* APSR */
52         if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
53             mask |= XPSR_GE;
54         }
55     }
56     /* EPSR reads as zero */
57     return xpsr_read(env) & mask;
58 }
59 
60 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure)
61 {
62     uint32_t value = env->v7m.control[secure];
63 
64     if (!secure) {
65         /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
66         value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
67     }
68     return value;
69 }
70 
71 #ifdef CONFIG_USER_ONLY
72 
73 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
74 {
75     uint32_t mask = extract32(maskreg, 8, 4);
76     uint32_t reg = extract32(maskreg, 0, 8);
77 
78     switch (reg) {
79     case 0 ... 7: /* xPSR sub-fields */
80         v7m_msr_xpsr(env, mask, reg, val);
81         break;
82     case 20: /* CONTROL */
83         /* There are no sub-fields that are actually writable from EL0. */
84         break;
85     default:
86         /* Unprivileged writes to other registers are ignored */
87         break;
88     }
89 }
90 
91 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
92 {
93     switch (reg) {
94     case 0 ... 7: /* xPSR sub-fields */
95         return v7m_mrs_xpsr(env, reg, 0);
96     case 20: /* CONTROL */
97         return arm_v7m_mrs_control(env, 0);
98     default:
99         /* Unprivileged reads others as zero.  */
100         return 0;
101     }
102 }
103 
104 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
105 {
106     /* translate.c should never generate calls here in user-only mode */
107     g_assert_not_reached();
108 }
109 
110 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
111 {
112     /* translate.c should never generate calls here in user-only mode */
113     g_assert_not_reached();
114 }
115 
116 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
117 {
118     /* translate.c should never generate calls here in user-only mode */
119     g_assert_not_reached();
120 }
121 
122 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
123 {
124     /* translate.c should never generate calls here in user-only mode */
125     g_assert_not_reached();
126 }
127 
128 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
129 {
130     /* translate.c should never generate calls here in user-only mode */
131     g_assert_not_reached();
132 }
133 
134 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
135 {
136     /*
137      * The TT instructions can be used by unprivileged code, but in
138      * user-only emulation we don't have the MPU.
139      * Luckily since we know we are NonSecure unprivileged (and that in
140      * turn means that the A flag wasn't specified), all the bits in the
141      * register must be zero:
142      *  IREGION: 0 because IRVALID is 0
143      *  IRVALID: 0 because NS
144      *  S: 0 because NS
145      *  NSRW: 0 because NS
146      *  NSR: 0 because NS
147      *  RW: 0 because unpriv and A flag not set
148      *  R: 0 because unpriv and A flag not set
149      *  SRVALID: 0 because NS
150      *  MRVALID: 0 because unpriv and A flag not set
151      *  SREGION: 0 because SRVALID is 0
152      *  MREGION: 0 because MRVALID is 0
153      */
154     return 0;
155 }
156 
157 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
158 {
159     return ARMMMUIdx_MUser;
160 }
161 
162 #else /* !CONFIG_USER_ONLY */
163 
164 static ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
165                                      bool secstate, bool priv, bool negpri)
166 {
167     ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
168 
169     if (priv) {
170         mmu_idx |= ARM_MMU_IDX_M_PRIV;
171     }
172 
173     if (negpri) {
174         mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
175     }
176 
177     if (secstate) {
178         mmu_idx |= ARM_MMU_IDX_M_S;
179     }
180 
181     return mmu_idx;
182 }
183 
184 static ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
185                                                        bool secstate, bool priv)
186 {
187     bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
188 
189     return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
190 }
191 
192 /* Return the MMU index for a v7M CPU in the specified security state */
193 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
194 {
195     bool priv = arm_v7m_is_handler_mode(env) ||
196         !(env->v7m.control[secstate] & 1);
197 
198     return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
199 }
200 
201 /*
202  * What kind of stack write are we doing? This affects how exceptions
203  * generated during the stacking are treated.
204  */
205 typedef enum StackingMode {
206     STACK_NORMAL,
207     STACK_IGNFAULTS,
208     STACK_LAZYFP,
209 } StackingMode;
210 
211 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
212                             ARMMMUIdx mmu_idx, StackingMode mode)
213 {
214     CPUState *cs = CPU(cpu);
215     CPUARMState *env = &cpu->env;
216     MemTxResult txres;
217     GetPhysAddrResult res = {};
218     ARMMMUFaultInfo fi = {};
219     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
220     int exc;
221     bool exc_secure;
222 
223     if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &res, &fi)) {
224         /* MPU/SAU lookup failed */
225         if (fi.type == ARMFault_QEMU_SFault) {
226             if (mode == STACK_LAZYFP) {
227                 qemu_log_mask(CPU_LOG_INT,
228                               "...SecureFault with SFSR.LSPERR "
229                               "during lazy stacking\n");
230                 env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
231             } else {
232                 qemu_log_mask(CPU_LOG_INT,
233                               "...SecureFault with SFSR.AUVIOL "
234                               "during stacking\n");
235                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
236             }
237             env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
238             env->v7m.sfar = addr;
239             exc = ARMV7M_EXCP_SECURE;
240             exc_secure = false;
241         } else {
242             if (mode == STACK_LAZYFP) {
243                 qemu_log_mask(CPU_LOG_INT,
244                               "...MemManageFault with CFSR.MLSPERR\n");
245                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
246             } else {
247                 qemu_log_mask(CPU_LOG_INT,
248                               "...MemManageFault with CFSR.MSTKERR\n");
249                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
250             }
251             exc = ARMV7M_EXCP_MEM;
252             exc_secure = secure;
253         }
254         goto pend_fault;
255     }
256     address_space_stl_le(arm_addressspace(cs, res.f.attrs), res.f.phys_addr,
257                          value, res.f.attrs, &txres);
258     if (txres != MEMTX_OK) {
259         /* BusFault trying to write the data */
260         if (mode == STACK_LAZYFP) {
261             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
262             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
263         } else {
264             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
265             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
266         }
267         exc = ARMV7M_EXCP_BUS;
268         exc_secure = false;
269         goto pend_fault;
270     }
271     return true;
272 
273 pend_fault:
274     /*
275      * By pending the exception at this point we are making
276      * the IMPDEF choice "overridden exceptions pended" (see the
277      * MergeExcInfo() pseudocode). The other choice would be to not
278      * pend them now and then make a choice about which to throw away
279      * later if we have two derived exceptions.
280      * The only case when we must not pend the exception but instead
281      * throw it away is if we are doing the push of the callee registers
282      * and we've already generated a derived exception (this is indicated
283      * by the caller passing STACK_IGNFAULTS). Even in this case we will
284      * still update the fault status registers.
285      */
286     switch (mode) {
287     case STACK_NORMAL:
288         armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
289         break;
290     case STACK_LAZYFP:
291         armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
292         break;
293     case STACK_IGNFAULTS:
294         break;
295     }
296     return false;
297 }
298 
299 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
300                            ARMMMUIdx mmu_idx)
301 {
302     CPUState *cs = CPU(cpu);
303     CPUARMState *env = &cpu->env;
304     MemTxResult txres;
305     GetPhysAddrResult res = {};
306     ARMMMUFaultInfo fi = {};
307     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
308     int exc;
309     bool exc_secure;
310     uint32_t value;
311 
312     if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
313         /* MPU/SAU lookup failed */
314         if (fi.type == ARMFault_QEMU_SFault) {
315             qemu_log_mask(CPU_LOG_INT,
316                           "...SecureFault with SFSR.AUVIOL during unstack\n");
317             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
318             env->v7m.sfar = addr;
319             exc = ARMV7M_EXCP_SECURE;
320             exc_secure = false;
321         } else {
322             qemu_log_mask(CPU_LOG_INT,
323                           "...MemManageFault with CFSR.MUNSTKERR\n");
324             env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
325             exc = ARMV7M_EXCP_MEM;
326             exc_secure = secure;
327         }
328         goto pend_fault;
329     }
330 
331     value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
332                               res.f.phys_addr, res.f.attrs, &txres);
333     if (txres != MEMTX_OK) {
334         /* BusFault trying to read the data */
335         qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
336         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
337         exc = ARMV7M_EXCP_BUS;
338         exc_secure = false;
339         goto pend_fault;
340     }
341 
342     *dest = value;
343     return true;
344 
345 pend_fault:
346     /*
347      * By pending the exception at this point we are making
348      * the IMPDEF choice "overridden exceptions pended" (see the
349      * MergeExcInfo() pseudocode). The other choice would be to not
350      * pend them now and then make a choice about which to throw away
351      * later if we have two derived exceptions.
352      */
353     armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
354     return false;
355 }
356 
357 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
358 {
359     /*
360      * Preserve FP state (because LSPACT was set and we are about
361      * to execute an FP instruction). This corresponds to the
362      * PreserveFPState() pseudocode.
363      * We may throw an exception if the stacking fails.
364      */
365     ARMCPU *cpu = env_archcpu(env);
366     bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
367     bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
368     bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
369     bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
370     uint32_t fpcar = env->v7m.fpcar[is_secure];
371     bool stacked_ok = true;
372     bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
373     bool take_exception;
374 
375     /* Take the iothread lock as we are going to touch the NVIC */
376     qemu_mutex_lock_iothread();
377 
378     /* Check the background context had access to the FPU */
379     if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
380         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
381         env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
382         stacked_ok = false;
383     } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
384         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
385         env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
386         stacked_ok = false;
387     }
388 
389     if (!splimviol && stacked_ok) {
390         /* We only stack if the stack limit wasn't violated */
391         int i;
392         ARMMMUIdx mmu_idx;
393 
394         mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
395         for (i = 0; i < (ts ? 32 : 16); i += 2) {
396             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
397             uint32_t faddr = fpcar + 4 * i;
398             uint32_t slo = extract64(dn, 0, 32);
399             uint32_t shi = extract64(dn, 32, 32);
400 
401             if (i >= 16) {
402                 faddr += 8; /* skip the slot for the FPSCR/VPR */
403             }
404             stacked_ok = stacked_ok &&
405                 v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
406                 v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
407         }
408 
409         stacked_ok = stacked_ok &&
410             v7m_stack_write(cpu, fpcar + 0x40,
411                             vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
412         if (cpu_isar_feature(aa32_mve, cpu)) {
413             stacked_ok = stacked_ok &&
414                 v7m_stack_write(cpu, fpcar + 0x44,
415                                 env->v7m.vpr, mmu_idx, STACK_LAZYFP);
416         }
417     }
418 
419     /*
420      * We definitely pended an exception, but it's possible that it
421      * might not be able to be taken now. If its priority permits us
422      * to take it now, then we must not update the LSPACT or FP regs,
423      * but instead jump out to take the exception immediately.
424      * If it's just pending and won't be taken until the current
425      * handler exits, then we do update LSPACT and the FP regs.
426      */
427     take_exception = !stacked_ok &&
428         armv7m_nvic_can_take_pending_exception(env->nvic);
429 
430     qemu_mutex_unlock_iothread();
431 
432     if (take_exception) {
433         raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
434     }
435 
436     env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
437 
438     if (ts) {
439         /* Clear s0 to s31 and the FPSCR and VPR */
440         int i;
441 
442         for (i = 0; i < 32; i += 2) {
443             *aa32_vfp_dreg(env, i / 2) = 0;
444         }
445         vfp_set_fpscr(env, 0);
446         if (cpu_isar_feature(aa32_mve, cpu)) {
447             env->v7m.vpr = 0;
448         }
449     }
450     /*
451      * Otherwise s0 to s15, FPSCR and VPR are UNKNOWN; we choose to leave them
452      * unchanged.
453      */
454 }
455 
456 /*
457  * Write to v7M CONTROL.SPSEL bit for the specified security bank.
458  * This may change the current stack pointer between Main and Process
459  * stack pointers if it is done for the CONTROL register for the current
460  * security state.
461  */
462 static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
463                                                  bool new_spsel,
464                                                  bool secstate)
465 {
466     bool old_is_psp = v7m_using_psp(env);
467 
468     env->v7m.control[secstate] =
469         deposit32(env->v7m.control[secstate],
470                   R_V7M_CONTROL_SPSEL_SHIFT,
471                   R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
472 
473     if (secstate == env->v7m.secure) {
474         bool new_is_psp = v7m_using_psp(env);
475         uint32_t tmp;
476 
477         if (old_is_psp != new_is_psp) {
478             tmp = env->v7m.other_sp;
479             env->v7m.other_sp = env->regs[13];
480             env->regs[13] = tmp;
481         }
482     }
483 }
484 
485 /*
486  * Write to v7M CONTROL.SPSEL bit. This may change the current
487  * stack pointer between Main and Process stack pointers.
488  */
489 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
490 {
491     write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
492 }
493 
494 void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
495 {
496     /*
497      * Write a new value to v7m.exception, thus transitioning into or out
498      * of Handler mode; this may result in a change of active stack pointer.
499      */
500     bool new_is_psp, old_is_psp = v7m_using_psp(env);
501     uint32_t tmp;
502 
503     env->v7m.exception = new_exc;
504 
505     new_is_psp = v7m_using_psp(env);
506 
507     if (old_is_psp != new_is_psp) {
508         tmp = env->v7m.other_sp;
509         env->v7m.other_sp = env->regs[13];
510         env->regs[13] = tmp;
511     }
512 }
513 
514 /* Switch M profile security state between NS and S */
515 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
516 {
517     uint32_t new_ss_msp, new_ss_psp;
518 
519     if (env->v7m.secure == new_secstate) {
520         return;
521     }
522 
523     /*
524      * All the banked state is accessed by looking at env->v7m.secure
525      * except for the stack pointer; rearrange the SP appropriately.
526      */
527     new_ss_msp = env->v7m.other_ss_msp;
528     new_ss_psp = env->v7m.other_ss_psp;
529 
530     if (v7m_using_psp(env)) {
531         env->v7m.other_ss_psp = env->regs[13];
532         env->v7m.other_ss_msp = env->v7m.other_sp;
533     } else {
534         env->v7m.other_ss_msp = env->regs[13];
535         env->v7m.other_ss_psp = env->v7m.other_sp;
536     }
537 
538     env->v7m.secure = new_secstate;
539 
540     if (v7m_using_psp(env)) {
541         env->regs[13] = new_ss_psp;
542         env->v7m.other_sp = new_ss_msp;
543     } else {
544         env->regs[13] = new_ss_msp;
545         env->v7m.other_sp = new_ss_psp;
546     }
547 }
548 
549 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
550 {
551     /*
552      * Handle v7M BXNS:
553      *  - if the return value is a magic value, do exception return (like BX)
554      *  - otherwise bit 0 of the return value is the target security state
555      */
556     uint32_t min_magic;
557 
558     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
559         /* Covers FNC_RETURN and EXC_RETURN magic */
560         min_magic = FNC_RETURN_MIN_MAGIC;
561     } else {
562         /* EXC_RETURN magic only */
563         min_magic = EXC_RETURN_MIN_MAGIC;
564     }
565 
566     if (dest >= min_magic) {
567         /*
568          * This is an exception return magic value; put it where
569          * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
570          * Note that if we ever add gen_ss_advance() singlestep support to
571          * M profile this should count as an "instruction execution complete"
572          * event (compare gen_bx_excret_final_code()).
573          */
574         env->regs[15] = dest & ~1;
575         env->thumb = dest & 1;
576         HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
577         /* notreached */
578     }
579 
580     /* translate.c should have made BXNS UNDEF unless we're secure */
581     assert(env->v7m.secure);
582 
583     if (!(dest & 1)) {
584         env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
585     }
586     switch_v7m_security_state(env, dest & 1);
587     env->thumb = true;
588     env->regs[15] = dest & ~1;
589     arm_rebuild_hflags(env);
590 }
591 
592 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
593 {
594     /*
595      * Handle v7M BLXNS:
596      *  - bit 0 of the destination address is the target security state
597      */
598 
599     /* At this point regs[15] is the address just after the BLXNS */
600     uint32_t nextinst = env->regs[15] | 1;
601     uint32_t sp = env->regs[13] - 8;
602     uint32_t saved_psr;
603 
604     /* translate.c will have made BLXNS UNDEF unless we're secure */
605     assert(env->v7m.secure);
606 
607     if (dest & 1) {
608         /*
609          * Target is Secure, so this is just a normal BLX,
610          * except that the low bit doesn't indicate Thumb/not.
611          */
612         env->regs[14] = nextinst;
613         env->thumb = true;
614         env->regs[15] = dest & ~1;
615         return;
616     }
617 
618     /* Target is non-secure: first push a stack frame */
619     if (!QEMU_IS_ALIGNED(sp, 8)) {
620         qemu_log_mask(LOG_GUEST_ERROR,
621                       "BLXNS with misaligned SP is UNPREDICTABLE\n");
622     }
623 
624     if (sp < v7m_sp_limit(env)) {
625         raise_exception(env, EXCP_STKOF, 0, 1);
626     }
627 
628     saved_psr = env->v7m.exception;
629     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
630         saved_psr |= XPSR_SFPA;
631     }
632 
633     /* Note that these stores can throw exceptions on MPU faults */
634     cpu_stl_data_ra(env, sp, nextinst, GETPC());
635     cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
636 
637     env->regs[13] = sp;
638     env->regs[14] = 0xfeffffff;
639     if (arm_v7m_is_handler_mode(env)) {
640         /*
641          * Write a dummy value to IPSR, to avoid leaking the current secure
642          * exception number to non-secure code. This is guaranteed not
643          * to cause write_v7m_exception() to actually change stacks.
644          */
645         write_v7m_exception(env, 1);
646     }
647     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
648     switch_v7m_security_state(env, 0);
649     env->thumb = true;
650     env->regs[15] = dest;
651     arm_rebuild_hflags(env);
652 }
653 
654 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
655                                 uint32_t *pvec)
656 {
657     CPUState *cs = CPU(cpu);
658     CPUARMState *env = &cpu->env;
659     MemTxResult result;
660     uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
661     uint32_t vector_entry;
662     MemTxAttrs attrs = {};
663     ARMMMUIdx mmu_idx;
664     bool exc_secure;
665 
666     qemu_log_mask(CPU_LOG_INT,
667                   "...loading from element %d of %s vector table at 0x%x\n",
668                   exc, targets_secure ? "secure" : "non-secure", addr);
669 
670     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
671 
672     /*
673      * We don't do a get_phys_addr() here because the rules for vector
674      * loads are special: they always use the default memory map, and
675      * the default memory map permits reads from all addresses.
676      * Since there's no easy way to pass through to pmsav8_mpu_lookup()
677      * that we want this special case which would always say "yes",
678      * we just do the SAU lookup here followed by a direct physical load.
679      */
680     attrs.secure = targets_secure;
681     attrs.user = false;
682 
683     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
684         V8M_SAttributes sattrs = {};
685 
686         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
687                             targets_secure, &sattrs);
688         if (sattrs.ns) {
689             attrs.secure = false;
690         } else if (!targets_secure) {
691             /*
692              * NS access to S memory: the underlying exception which we escalate
693              * to HardFault is SecureFault, which always targets Secure.
694              */
695             exc_secure = true;
696             goto load_fail;
697         }
698     }
699 
700     vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
701                                      attrs, &result);
702     if (result != MEMTX_OK) {
703         /*
704          * Underlying exception is BusFault: its target security state
705          * depends on BFHFNMINS.
706          */
707         exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
708         goto load_fail;
709     }
710     *pvec = vector_entry;
711     qemu_log_mask(CPU_LOG_INT, "...loaded new PC 0x%x\n", *pvec);
712     return true;
713 
714 load_fail:
715     /*
716      * All vector table fetch fails are reported as HardFault, with
717      * HFSR.VECTTBL and .FORCED set. (FORCED is set because
718      * technically the underlying exception is a SecureFault or BusFault
719      * that is escalated to HardFault.) This is a terminal exception,
720      * so we will either take the HardFault immediately or else enter
721      * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
722      * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
723      * secure); otherwise it targets the same security state as the
724      * underlying exception.
725      * In v8.1M HardFaults from vector table fetch fails don't set FORCED.
726      */
727     if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
728         exc_secure = true;
729     }
730     env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK;
731     if (!arm_feature(env, ARM_FEATURE_V8_1M)) {
732         env->v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
733     }
734     armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
735     return false;
736 }
737 
738 static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
739 {
740     /*
741      * Return the integrity signature value for the callee-saves
742      * stack frame section. @lr is the exception return payload/LR value
743      * whose FType bit forms bit 0 of the signature if FP is present.
744      */
745     uint32_t sig = 0xfefa125a;
746 
747     if (!cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))
748         || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
749         sig |= 1;
750     }
751     return sig;
752 }
753 
754 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
755                                   bool ignore_faults)
756 {
757     /*
758      * For v8M, push the callee-saves register part of the stack frame.
759      * Compare the v8M pseudocode PushCalleeStack().
760      * In the tailchaining case this may not be the current stack.
761      */
762     CPUARMState *env = &cpu->env;
763     uint32_t *frame_sp_p;
764     uint32_t frameptr;
765     ARMMMUIdx mmu_idx;
766     bool stacked_ok;
767     uint32_t limit;
768     bool want_psp;
769     uint32_t sig;
770     StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
771 
772     if (dotailchain) {
773         bool mode = lr & R_V7M_EXCRET_MODE_MASK;
774         bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
775             !mode;
776 
777         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
778         frame_sp_p = arm_v7m_get_sp_ptr(env, M_REG_S, mode,
779                                         lr & R_V7M_EXCRET_SPSEL_MASK);
780         want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
781         if (want_psp) {
782             limit = env->v7m.psplim[M_REG_S];
783         } else {
784             limit = env->v7m.msplim[M_REG_S];
785         }
786     } else {
787         mmu_idx = arm_mmu_idx(env);
788         frame_sp_p = &env->regs[13];
789         limit = v7m_sp_limit(env);
790     }
791 
792     frameptr = *frame_sp_p - 0x28;
793     if (frameptr < limit) {
794         /*
795          * Stack limit failure: set SP to the limit value, and generate
796          * STKOF UsageFault. Stack pushes below the limit must not be
797          * performed. It is IMPDEF whether pushes above the limit are
798          * performed; we choose not to.
799          */
800         qemu_log_mask(CPU_LOG_INT,
801                       "...STKOF during callee-saves register stacking\n");
802         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
803         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
804                                 env->v7m.secure);
805         *frame_sp_p = limit;
806         return true;
807     }
808 
809     /*
810      * Write as much of the stack frame as we can. A write failure may
811      * cause us to pend a derived exception.
812      */
813     sig = v7m_integrity_sig(env, lr);
814     stacked_ok =
815         v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
816         v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
817         v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
818         v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
819         v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
820         v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
821         v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
822         v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
823         v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
824 
825     /* Update SP regardless of whether any of the stack accesses failed. */
826     *frame_sp_p = frameptr;
827 
828     return !stacked_ok;
829 }
830 
831 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
832                                 bool ignore_stackfaults)
833 {
834     /*
835      * Do the "take the exception" parts of exception entry,
836      * but not the pushing of state to the stack. This is
837      * similar to the pseudocode ExceptionTaken() function.
838      */
839     CPUARMState *env = &cpu->env;
840     uint32_t addr;
841     bool targets_secure;
842     int exc;
843     bool push_failed = false;
844 
845     armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
846     qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
847                   targets_secure ? "secure" : "nonsecure", exc);
848 
849     if (dotailchain) {
850         /* Sanitize LR FType and PREFIX bits */
851         if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
852             lr |= R_V7M_EXCRET_FTYPE_MASK;
853         }
854         lr = deposit32(lr, 24, 8, 0xff);
855     }
856 
857     if (arm_feature(env, ARM_FEATURE_V8)) {
858         if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
859             (lr & R_V7M_EXCRET_S_MASK)) {
860             /*
861              * The background code (the owner of the registers in the
862              * exception frame) is Secure. This means it may either already
863              * have or now needs to push callee-saves registers.
864              */
865             if (targets_secure) {
866                 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
867                     /*
868                      * We took an exception from Secure to NonSecure
869                      * (which means the callee-saved registers got stacked)
870                      * and are now tailchaining to a Secure exception.
871                      * Clear DCRS so eventual return from this Secure
872                      * exception unstacks the callee-saved registers.
873                      */
874                     lr &= ~R_V7M_EXCRET_DCRS_MASK;
875                 }
876             } else {
877                 /*
878                  * We're going to a non-secure exception; push the
879                  * callee-saves registers to the stack now, if they're
880                  * not already saved.
881                  */
882                 if (lr & R_V7M_EXCRET_DCRS_MASK &&
883                     !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
884                     push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
885                                                         ignore_stackfaults);
886                 }
887                 lr |= R_V7M_EXCRET_DCRS_MASK;
888             }
889         }
890 
891         lr &= ~R_V7M_EXCRET_ES_MASK;
892         if (targets_secure) {
893             lr |= R_V7M_EXCRET_ES_MASK;
894         }
895         lr &= ~R_V7M_EXCRET_SPSEL_MASK;
896         if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
897             lr |= R_V7M_EXCRET_SPSEL_MASK;
898         }
899 
900         /*
901          * Clear registers if necessary to prevent non-secure exception
902          * code being able to see register values from secure code.
903          * Where register values become architecturally UNKNOWN we leave
904          * them with their previous values. v8.1M is tighter than v8.0M
905          * here and always zeroes the caller-saved registers regardless
906          * of the security state the exception is targeting.
907          */
908         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
909             if (!targets_secure || arm_feature(env, ARM_FEATURE_V8_1M)) {
910                 /*
911                  * Always clear the caller-saved registers (they have been
912                  * pushed to the stack earlier in v7m_push_stack()).
913                  * Clear callee-saved registers if the background code is
914                  * Secure (in which case these regs were saved in
915                  * v7m_push_callee_stack()).
916                  */
917                 int i;
918                 /*
919                  * r4..r11 are callee-saves, zero only if background
920                  * state was Secure (EXCRET.S == 1) and exception
921                  * targets Non-secure state
922                  */
923                 bool zero_callee_saves = !targets_secure &&
924                     (lr & R_V7M_EXCRET_S_MASK);
925 
926                 for (i = 0; i < 13; i++) {
927                     if (i < 4 || i > 11 || zero_callee_saves) {
928                         env->regs[i] = 0;
929                     }
930                 }
931                 /* Clear EAPSR */
932                 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
933             }
934         }
935     }
936 
937     if (push_failed && !ignore_stackfaults) {
938         /*
939          * Derived exception on callee-saves register stacking:
940          * we might now want to take a different exception which
941          * targets a different security state, so try again from the top.
942          */
943         qemu_log_mask(CPU_LOG_INT,
944                       "...derived exception on callee-saves register stacking");
945         v7m_exception_taken(cpu, lr, true, true);
946         return;
947     }
948 
949     if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
950         /* Vector load failed: derived exception */
951         qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
952         v7m_exception_taken(cpu, lr, true, true);
953         return;
954     }
955 
956     /*
957      * Now we've done everything that might cause a derived exception
958      * we can go ahead and activate whichever exception we're going to
959      * take (which might now be the derived exception).
960      */
961     armv7m_nvic_acknowledge_irq(env->nvic);
962 
963     /* Switch to target security state -- must do this before writing SPSEL */
964     switch_v7m_security_state(env, targets_secure);
965     write_v7m_control_spsel(env, 0);
966     arm_clear_exclusive(env);
967     /* Clear SFPA and FPCA (has no effect if no FPU) */
968     env->v7m.control[M_REG_S] &=
969         ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
970     /* Clear IT bits */
971     env->condexec_bits = 0;
972     env->regs[14] = lr;
973     env->regs[15] = addr & 0xfffffffe;
974     env->thumb = addr & 1;
975     arm_rebuild_hflags(env);
976 }
977 
978 static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
979                              bool apply_splim)
980 {
981     /*
982      * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
983      * that we will need later in order to do lazy FP reg stacking.
984      */
985     bool is_secure = env->v7m.secure;
986     NVICState *nvic = env->nvic;
987     /*
988      * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
989      * are banked and we want to update the bit in the bank for the
990      * current security state; and in one case we want to specifically
991      * update the NS banked version of a bit even if we are secure.
992      */
993     uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
994     uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
995     uint32_t *fpccr = &env->v7m.fpccr[is_secure];
996     bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
997 
998     env->v7m.fpcar[is_secure] = frameptr & ~0x7;
999 
1000     if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
1001         bool splimviol;
1002         uint32_t splim = v7m_sp_limit(env);
1003         bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
1004             (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
1005 
1006         splimviol = !ign && frameptr < splim;
1007         *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
1008     }
1009 
1010     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
1011 
1012     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
1013 
1014     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
1015 
1016     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
1017                         !arm_v7m_is_handler_mode(env));
1018 
1019     hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
1020     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
1021 
1022     bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
1023     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
1024 
1025     mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
1026     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
1027 
1028     ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
1029     *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
1030 
1031     monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
1032     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
1033 
1034     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1035         s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
1036         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
1037 
1038         sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
1039         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
1040     }
1041 }
1042 
1043 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
1044 {
1045     /* fptr is the value of Rn, the frame pointer we store the FP regs to */
1046     ARMCPU *cpu = env_archcpu(env);
1047     bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1048     bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
1049     uintptr_t ra = GETPC();
1050 
1051     assert(env->v7m.secure);
1052 
1053     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1054         return;
1055     }
1056 
1057     /* Check access to the coprocessor is permitted */
1058     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1059         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1060     }
1061 
1062     if (lspact) {
1063         /* LSPACT should not be active when there is active FP state */
1064         raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
1065     }
1066 
1067     if (fptr & 7) {
1068         raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1069     }
1070 
1071     /*
1072      * Note that we do not use v7m_stack_write() here, because the
1073      * accesses should not set the FSR bits for stacking errors if they
1074      * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
1075      * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
1076      * and longjmp out.
1077      */
1078     if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1079         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1080         int i;
1081 
1082         for (i = 0; i < (ts ? 32 : 16); i += 2) {
1083             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1084             uint32_t faddr = fptr + 4 * i;
1085             uint32_t slo = extract64(dn, 0, 32);
1086             uint32_t shi = extract64(dn, 32, 32);
1087 
1088             if (i >= 16) {
1089                 faddr += 8; /* skip the slot for the FPSCR */
1090             }
1091             cpu_stl_data_ra(env, faddr, slo, ra);
1092             cpu_stl_data_ra(env, faddr + 4, shi, ra);
1093         }
1094         cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
1095         if (cpu_isar_feature(aa32_mve, cpu)) {
1096             cpu_stl_data_ra(env, fptr + 0x44, env->v7m.vpr, ra);
1097         }
1098 
1099         /*
1100          * If TS is 0 then s0 to s15, FPSCR and VPR are UNKNOWN; we choose to
1101          * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1102          */
1103         if (ts) {
1104             for (i = 0; i < 32; i += 2) {
1105                 *aa32_vfp_dreg(env, i / 2) = 0;
1106             }
1107             vfp_set_fpscr(env, 0);
1108             if (cpu_isar_feature(aa32_mve, cpu)) {
1109                 env->v7m.vpr = 0;
1110             }
1111         }
1112     } else {
1113         v7m_update_fpccr(env, fptr, false);
1114     }
1115 
1116     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
1117 }
1118 
1119 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
1120 {
1121     ARMCPU *cpu = env_archcpu(env);
1122     uintptr_t ra = GETPC();
1123 
1124     /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1125     assert(env->v7m.secure);
1126 
1127     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1128         return;
1129     }
1130 
1131     /* Check access to the coprocessor is permitted */
1132     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1133         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1134     }
1135 
1136     if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1137         /* State in FP is still valid */
1138         env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
1139     } else {
1140         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1141         int i;
1142         uint32_t fpscr;
1143 
1144         if (fptr & 7) {
1145             raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1146         }
1147 
1148         for (i = 0; i < (ts ? 32 : 16); i += 2) {
1149             uint32_t slo, shi;
1150             uint64_t dn;
1151             uint32_t faddr = fptr + 4 * i;
1152 
1153             if (i >= 16) {
1154                 faddr += 8; /* skip the slot for the FPSCR and VPR */
1155             }
1156 
1157             slo = cpu_ldl_data_ra(env, faddr, ra);
1158             shi = cpu_ldl_data_ra(env, faddr + 4, ra);
1159 
1160             dn = (uint64_t) shi << 32 | slo;
1161             *aa32_vfp_dreg(env, i / 2) = dn;
1162         }
1163         fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
1164         vfp_set_fpscr(env, fpscr);
1165         if (cpu_isar_feature(aa32_mve, cpu)) {
1166             env->v7m.vpr = cpu_ldl_data_ra(env, fptr + 0x44, ra);
1167         }
1168     }
1169 
1170     env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
1171 }
1172 
1173 static bool v7m_push_stack(ARMCPU *cpu)
1174 {
1175     /*
1176      * Do the "set up stack frame" part of exception entry,
1177      * similar to pseudocode PushStack().
1178      * Return true if we generate a derived exception (and so
1179      * should ignore further stack faults trying to process
1180      * that derived exception.)
1181      */
1182     bool stacked_ok = true, limitviol = false;
1183     CPUARMState *env = &cpu->env;
1184     uint32_t xpsr = xpsr_read(env);
1185     uint32_t frameptr = env->regs[13];
1186     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1187     uint32_t framesize;
1188     bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
1189 
1190     if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
1191         (env->v7m.secure || nsacr_cp10)) {
1192         if (env->v7m.secure &&
1193             env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
1194             framesize = 0xa8;
1195         } else {
1196             framesize = 0x68;
1197         }
1198     } else {
1199         framesize = 0x20;
1200     }
1201 
1202     /* Align stack pointer if the guest wants that */
1203     if ((frameptr & 4) &&
1204         (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
1205         frameptr -= 4;
1206         xpsr |= XPSR_SPREALIGN;
1207     }
1208 
1209     xpsr &= ~XPSR_SFPA;
1210     if (env->v7m.secure &&
1211         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1212         xpsr |= XPSR_SFPA;
1213     }
1214 
1215     frameptr -= framesize;
1216 
1217     if (arm_feature(env, ARM_FEATURE_V8)) {
1218         uint32_t limit = v7m_sp_limit(env);
1219 
1220         if (frameptr < limit) {
1221             /*
1222              * Stack limit failure: set SP to the limit value, and generate
1223              * STKOF UsageFault. Stack pushes below the limit must not be
1224              * performed. It is IMPDEF whether pushes above the limit are
1225              * performed; we choose not to.
1226              */
1227             qemu_log_mask(CPU_LOG_INT,
1228                           "...STKOF during stacking\n");
1229             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
1230             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1231                                     env->v7m.secure);
1232             env->regs[13] = limit;
1233             /*
1234              * We won't try to perform any further memory accesses but
1235              * we must continue through the following code to check for
1236              * permission faults during FPU state preservation, and we
1237              * must update FPCCR if lazy stacking is enabled.
1238              */
1239             limitviol = true;
1240             stacked_ok = false;
1241         }
1242     }
1243 
1244     /*
1245      * Write as much of the stack frame as we can. If we fail a stack
1246      * write this will result in a derived exception being pended
1247      * (which may be taken in preference to the one we started with
1248      * if it has higher priority).
1249      */
1250     stacked_ok = stacked_ok &&
1251         v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
1252         v7m_stack_write(cpu, frameptr + 4, env->regs[1],
1253                         mmu_idx, STACK_NORMAL) &&
1254         v7m_stack_write(cpu, frameptr + 8, env->regs[2],
1255                         mmu_idx, STACK_NORMAL) &&
1256         v7m_stack_write(cpu, frameptr + 12, env->regs[3],
1257                         mmu_idx, STACK_NORMAL) &&
1258         v7m_stack_write(cpu, frameptr + 16, env->regs[12],
1259                         mmu_idx, STACK_NORMAL) &&
1260         v7m_stack_write(cpu, frameptr + 20, env->regs[14],
1261                         mmu_idx, STACK_NORMAL) &&
1262         v7m_stack_write(cpu, frameptr + 24, env->regs[15],
1263                         mmu_idx, STACK_NORMAL) &&
1264         v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
1265 
1266     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
1267         /* FPU is active, try to save its registers */
1268         bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1269         bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
1270 
1271         if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1272             qemu_log_mask(CPU_LOG_INT,
1273                           "...SecureFault because LSPACT and FPCA both set\n");
1274             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1275             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1276         } else if (!env->v7m.secure && !nsacr_cp10) {
1277             qemu_log_mask(CPU_LOG_INT,
1278                           "...Secure UsageFault with CFSR.NOCP because "
1279                           "NSACR.CP10 prevents stacking FP regs\n");
1280             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
1281             env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1282         } else {
1283             if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1284                 /* Lazy stacking disabled, save registers now */
1285                 int i;
1286                 bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
1287                                                  arm_current_el(env) != 0);
1288 
1289                 if (stacked_ok && !cpacr_pass) {
1290                     /*
1291                      * Take UsageFault if CPACR forbids access. The pseudocode
1292                      * here does a full CheckCPEnabled() but we know the NSACR
1293                      * check can never fail as we have already handled that.
1294                      */
1295                     qemu_log_mask(CPU_LOG_INT,
1296                                   "...UsageFault with CFSR.NOCP because "
1297                                   "CPACR.CP10 prevents stacking FP regs\n");
1298                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1299                                             env->v7m.secure);
1300                     env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
1301                     stacked_ok = false;
1302                 }
1303 
1304                 for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1305                     uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1306                     uint32_t faddr = frameptr + 0x20 + 4 * i;
1307                     uint32_t slo = extract64(dn, 0, 32);
1308                     uint32_t shi = extract64(dn, 32, 32);
1309 
1310                     if (i >= 16) {
1311                         faddr += 8; /* skip the slot for the FPSCR and VPR */
1312                     }
1313                     stacked_ok = stacked_ok &&
1314                         v7m_stack_write(cpu, faddr, slo,
1315                                         mmu_idx, STACK_NORMAL) &&
1316                         v7m_stack_write(cpu, faddr + 4, shi,
1317                                         mmu_idx, STACK_NORMAL);
1318                 }
1319                 stacked_ok = stacked_ok &&
1320                     v7m_stack_write(cpu, frameptr + 0x60,
1321                                     vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
1322                 if (cpu_isar_feature(aa32_mve, cpu)) {
1323                     stacked_ok = stacked_ok &&
1324                         v7m_stack_write(cpu, frameptr + 0x64,
1325                                         env->v7m.vpr, mmu_idx, STACK_NORMAL);
1326                 }
1327                 if (cpacr_pass) {
1328                     for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1329                         *aa32_vfp_dreg(env, i / 2) = 0;
1330                     }
1331                     vfp_set_fpscr(env, 0);
1332                     if (cpu_isar_feature(aa32_mve, cpu)) {
1333                         env->v7m.vpr = 0;
1334                     }
1335                 }
1336             } else {
1337                 /* Lazy stacking enabled, save necessary info to stack later */
1338                 v7m_update_fpccr(env, frameptr + 0x20, true);
1339             }
1340         }
1341     }
1342 
1343     /*
1344      * If we broke a stack limit then SP was already updated earlier;
1345      * otherwise we update SP regardless of whether any of the stack
1346      * accesses failed or we took some other kind of fault.
1347      */
1348     if (!limitviol) {
1349         env->regs[13] = frameptr;
1350     }
1351 
1352     return !stacked_ok;
1353 }
1354 
1355 static void do_v7m_exception_exit(ARMCPU *cpu)
1356 {
1357     CPUARMState *env = &cpu->env;
1358     uint32_t excret;
1359     uint32_t xpsr, xpsr_mask;
1360     bool ufault = false;
1361     bool sfault = false;
1362     bool return_to_sp_process;
1363     bool return_to_handler;
1364     bool rettobase = false;
1365     bool exc_secure = false;
1366     bool return_to_secure;
1367     bool ftype;
1368     bool restore_s16_s31 = false;
1369 
1370     /*
1371      * If we're not in Handler mode then jumps to magic exception-exit
1372      * addresses don't have magic behaviour. However for the v8M
1373      * security extensions the magic secure-function-return has to
1374      * work in thread mode too, so to avoid doing an extra check in
1375      * the generated code we allow exception-exit magic to also cause the
1376      * internal exception and bring us here in thread mode. Correct code
1377      * will never try to do this (the following insn fetch will always
1378      * fault) so we the overhead of having taken an unnecessary exception
1379      * doesn't matter.
1380      */
1381     if (!arm_v7m_is_handler_mode(env)) {
1382         return;
1383     }
1384 
1385     /*
1386      * In the spec pseudocode ExceptionReturn() is called directly
1387      * from BXWritePC() and gets the full target PC value including
1388      * bit zero. In QEMU's implementation we treat it as a normal
1389      * jump-to-register (which is then caught later on), and so split
1390      * the target value up between env->regs[15] and env->thumb in
1391      * gen_bx(). Reconstitute it.
1392      */
1393     excret = env->regs[15];
1394     if (env->thumb) {
1395         excret |= 1;
1396     }
1397 
1398     qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
1399                   " previous exception %d\n",
1400                   excret, env->v7m.exception);
1401 
1402     if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
1403         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
1404                       "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
1405                       excret);
1406     }
1407 
1408     ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
1409 
1410     if (!ftype && !cpu_isar_feature(aa32_vfp_simd, cpu)) {
1411         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
1412                       "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
1413                       "if FPU not present\n",
1414                       excret);
1415         ftype = true;
1416     }
1417 
1418     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1419         /*
1420          * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1421          * we pick which FAULTMASK to clear.
1422          */
1423         if (!env->v7m.secure &&
1424             ((excret & R_V7M_EXCRET_ES_MASK) ||
1425              !(excret & R_V7M_EXCRET_DCRS_MASK))) {
1426             sfault = 1;
1427             /* For all other purposes, treat ES as 0 (R_HXSR) */
1428             excret &= ~R_V7M_EXCRET_ES_MASK;
1429         }
1430         exc_secure = excret & R_V7M_EXCRET_ES_MASK;
1431     }
1432 
1433     if (env->v7m.exception != ARMV7M_EXCP_NMI) {
1434         /*
1435          * Auto-clear FAULTMASK on return from other than NMI.
1436          * If the security extension is implemented then this only
1437          * happens if the raw execution priority is >= 0; the
1438          * value of the ES bit in the exception return value indicates
1439          * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1440          */
1441         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1442             if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
1443                 env->v7m.faultmask[exc_secure] = 0;
1444             }
1445         } else {
1446             env->v7m.faultmask[M_REG_NS] = 0;
1447         }
1448     }
1449 
1450     switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
1451                                      exc_secure)) {
1452     case -1:
1453         /* attempt to exit an exception that isn't active */
1454         ufault = true;
1455         break;
1456     case 0:
1457         /* still an irq active now */
1458         break;
1459     case 1:
1460         /*
1461          * We returned to base exception level, no nesting.
1462          * (In the pseudocode this is written using "NestedActivation != 1"
1463          * where we have 'rettobase == false'.)
1464          */
1465         rettobase = true;
1466         break;
1467     default:
1468         g_assert_not_reached();
1469     }
1470 
1471     return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
1472     return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
1473     return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
1474         (excret & R_V7M_EXCRET_S_MASK);
1475 
1476     if (arm_feature(env, ARM_FEATURE_V8)) {
1477         if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1478             /*
1479              * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1480              * we choose to take the UsageFault.
1481              */
1482             if ((excret & R_V7M_EXCRET_S_MASK) ||
1483                 (excret & R_V7M_EXCRET_ES_MASK) ||
1484                 !(excret & R_V7M_EXCRET_DCRS_MASK)) {
1485                 ufault = true;
1486             }
1487         }
1488         if (excret & R_V7M_EXCRET_RES0_MASK) {
1489             ufault = true;
1490         }
1491     } else {
1492         /* For v7M we only recognize certain combinations of the low bits */
1493         switch (excret & 0xf) {
1494         case 1: /* Return to Handler */
1495             break;
1496         case 13: /* Return to Thread using Process stack */
1497         case 9: /* Return to Thread using Main stack */
1498             /*
1499              * We only need to check NONBASETHRDENA for v7M, because in
1500              * v8M this bit does not exist (it is RES1).
1501              */
1502             if (!rettobase &&
1503                 !(env->v7m.ccr[env->v7m.secure] &
1504                   R_V7M_CCR_NONBASETHRDENA_MASK)) {
1505                 ufault = true;
1506             }
1507             break;
1508         default:
1509             ufault = true;
1510         }
1511     }
1512 
1513     /*
1514      * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1515      * Handler mode (and will be until we write the new XPSR.Interrupt
1516      * field) this does not switch around the current stack pointer.
1517      * We must do this before we do any kind of tailchaining, including
1518      * for the derived exceptions on integrity check failures, or we will
1519      * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1520      */
1521     write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
1522 
1523     /*
1524      * Clear scratch FP values left in caller saved registers; this
1525      * must happen before any kind of tail chaining.
1526      */
1527     if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
1528         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
1529         if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1530             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1531             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1532             qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1533                           "stackframe: error during lazy state deactivation\n");
1534             v7m_exception_taken(cpu, excret, true, false);
1535             return;
1536         } else {
1537             if (arm_feature(env, ARM_FEATURE_V8_1M)) {
1538                 /* v8.1M adds this NOCP check */
1539                 bool nsacr_pass = exc_secure ||
1540                     extract32(env->v7m.nsacr, 10, 1);
1541                 bool cpacr_pass = v7m_cpacr_pass(env, exc_secure, true);
1542                 if (!nsacr_pass) {
1543                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1544                     env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1545                     qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1546                         "stackframe: NSACR prevents clearing FPU registers\n");
1547                     v7m_exception_taken(cpu, excret, true, false);
1548                     return;
1549                 } else if (!cpacr_pass) {
1550                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1551                                             exc_secure);
1552                     env->v7m.cfsr[exc_secure] |= R_V7M_CFSR_NOCP_MASK;
1553                     qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1554                         "stackframe: CPACR prevents clearing FPU registers\n");
1555                     v7m_exception_taken(cpu, excret, true, false);
1556                     return;
1557                 }
1558             }
1559             /* Clear s0..s15, FPSCR and VPR */
1560             int i;
1561 
1562             for (i = 0; i < 16; i += 2) {
1563                 *aa32_vfp_dreg(env, i / 2) = 0;
1564             }
1565             vfp_set_fpscr(env, 0);
1566             if (cpu_isar_feature(aa32_mve, cpu)) {
1567                 env->v7m.vpr = 0;
1568             }
1569         }
1570     }
1571 
1572     if (sfault) {
1573         env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
1574         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1575         qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1576                       "stackframe: failed EXC_RETURN.ES validity check\n");
1577         v7m_exception_taken(cpu, excret, true, false);
1578         return;
1579     }
1580 
1581     if (ufault) {
1582         /*
1583          * Bad exception return: instead of popping the exception
1584          * stack, directly take a usage fault on the current stack.
1585          */
1586         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1587         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1588         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1589                       "stackframe: failed exception return integrity check\n");
1590         v7m_exception_taken(cpu, excret, true, false);
1591         return;
1592     }
1593 
1594     /*
1595      * Tailchaining: if there is currently a pending exception that
1596      * is high enough priority to preempt execution at the level we're
1597      * about to return to, then just directly take that exception now,
1598      * avoiding an unstack-and-then-stack. Note that now we have
1599      * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1600      * our current execution priority is already the execution priority we are
1601      * returning to -- none of the state we would unstack or set based on
1602      * the EXCRET value affects it.
1603      */
1604     if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
1605         qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
1606         v7m_exception_taken(cpu, excret, true, false);
1607         return;
1608     }
1609 
1610     switch_v7m_security_state(env, return_to_secure);
1611 
1612     {
1613         /*
1614          * The stack pointer we should be reading the exception frame from
1615          * depends on bits in the magic exception return type value (and
1616          * for v8M isn't necessarily the stack pointer we will eventually
1617          * end up resuming execution with). Get a pointer to the location
1618          * in the CPU state struct where the SP we need is currently being
1619          * stored; we will use and modify it in place.
1620          * We use this limited C variable scope so we don't accidentally
1621          * use 'frame_sp_p' after we do something that makes it invalid.
1622          */
1623         bool spsel = env->v7m.control[return_to_secure] & R_V7M_CONTROL_SPSEL_MASK;
1624         uint32_t *frame_sp_p = arm_v7m_get_sp_ptr(env, return_to_secure,
1625                                                   !return_to_handler, spsel);
1626         uint32_t frameptr = *frame_sp_p;
1627         bool pop_ok = true;
1628         ARMMMUIdx mmu_idx;
1629         bool return_to_priv = return_to_handler ||
1630             !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
1631 
1632         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
1633                                                         return_to_priv);
1634 
1635         if (!QEMU_IS_ALIGNED(frameptr, 8) &&
1636             arm_feature(env, ARM_FEATURE_V8)) {
1637             qemu_log_mask(LOG_GUEST_ERROR,
1638                           "M profile exception return with non-8-aligned SP "
1639                           "for destination state is UNPREDICTABLE\n");
1640         }
1641 
1642         /* Do we need to pop callee-saved registers? */
1643         if (return_to_secure &&
1644             ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
1645              (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
1646             uint32_t actual_sig;
1647 
1648             pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
1649 
1650             if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
1651                 /* Take a SecureFault on the current stack */
1652                 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
1653                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1654                 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1655                               "stackframe: failed exception return integrity "
1656                               "signature check\n");
1657                 v7m_exception_taken(cpu, excret, true, false);
1658                 return;
1659             }
1660 
1661             pop_ok = pop_ok &&
1662                 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
1663                 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
1664                 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
1665                 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
1666                 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
1667                 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
1668                 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
1669                 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
1670 
1671             frameptr += 0x28;
1672         }
1673 
1674         /* Pop registers */
1675         pop_ok = pop_ok &&
1676             v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
1677             v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
1678             v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
1679             v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
1680             v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
1681             v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
1682             v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
1683             v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
1684 
1685         if (!pop_ok) {
1686             /*
1687              * v7m_stack_read() pended a fault, so take it (as a tail
1688              * chained exception on the same stack frame)
1689              */
1690             qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
1691             v7m_exception_taken(cpu, excret, true, false);
1692             return;
1693         }
1694 
1695         /*
1696          * Returning from an exception with a PC with bit 0 set is defined
1697          * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1698          * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1699          * the lsbit, and there are several RTOSes out there which incorrectly
1700          * assume the r15 in the stack frame should be a Thumb-style "lsbit
1701          * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1702          * complain about the badly behaved guest.
1703          */
1704         if (env->regs[15] & 1) {
1705             env->regs[15] &= ~1U;
1706             if (!arm_feature(env, ARM_FEATURE_V8)) {
1707                 qemu_log_mask(LOG_GUEST_ERROR,
1708                               "M profile return from interrupt with misaligned "
1709                               "PC is UNPREDICTABLE on v7M\n");
1710             }
1711         }
1712 
1713         if (arm_feature(env, ARM_FEATURE_V8)) {
1714             /*
1715              * For v8M we have to check whether the xPSR exception field
1716              * matches the EXCRET value for return to handler/thread
1717              * before we commit to changing the SP and xPSR.
1718              */
1719             bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
1720             if (return_to_handler != will_be_handler) {
1721                 /*
1722                  * Take an INVPC UsageFault on the current stack.
1723                  * By this point we will have switched to the security state
1724                  * for the background state, so this UsageFault will target
1725                  * that state.
1726                  */
1727                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1728                                         env->v7m.secure);
1729                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1730                 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1731                               "stackframe: failed exception return integrity "
1732                               "check\n");
1733                 v7m_exception_taken(cpu, excret, true, false);
1734                 return;
1735             }
1736         }
1737 
1738         if (!ftype) {
1739             /* FP present and we need to handle it */
1740             if (!return_to_secure &&
1741                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
1742                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1743                 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1744                 qemu_log_mask(CPU_LOG_INT,
1745                               "...taking SecureFault on existing stackframe: "
1746                               "Secure LSPACT set but exception return is "
1747                               "not to secure state\n");
1748                 v7m_exception_taken(cpu, excret, true, false);
1749                 return;
1750             }
1751 
1752             restore_s16_s31 = return_to_secure &&
1753                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
1754 
1755             if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
1756                 /* State in FPU is still valid, just clear LSPACT */
1757                 env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
1758             } else {
1759                 int i;
1760                 uint32_t fpscr;
1761                 bool cpacr_pass, nsacr_pass;
1762 
1763                 cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
1764                                             return_to_priv);
1765                 nsacr_pass = return_to_secure ||
1766                     extract32(env->v7m.nsacr, 10, 1);
1767 
1768                 if (!cpacr_pass) {
1769                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1770                                             return_to_secure);
1771                     env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
1772                     qemu_log_mask(CPU_LOG_INT,
1773                                   "...taking UsageFault on existing "
1774                                   "stackframe: CPACR.CP10 prevents unstacking "
1775                                   "FP regs\n");
1776                     v7m_exception_taken(cpu, excret, true, false);
1777                     return;
1778                 } else if (!nsacr_pass) {
1779                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1780                     env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
1781                     qemu_log_mask(CPU_LOG_INT,
1782                                   "...taking Secure UsageFault on existing "
1783                                   "stackframe: NSACR.CP10 prevents unstacking "
1784                                   "FP regs\n");
1785                     v7m_exception_taken(cpu, excret, true, false);
1786                     return;
1787                 }
1788 
1789                 for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1790                     uint32_t slo, shi;
1791                     uint64_t dn;
1792                     uint32_t faddr = frameptr + 0x20 + 4 * i;
1793 
1794                     if (i >= 16) {
1795                         faddr += 8; /* Skip the slot for the FPSCR and VPR */
1796                     }
1797 
1798                     pop_ok = pop_ok &&
1799                         v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
1800                         v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
1801 
1802                     if (!pop_ok) {
1803                         break;
1804                     }
1805 
1806                     dn = (uint64_t)shi << 32 | slo;
1807                     *aa32_vfp_dreg(env, i / 2) = dn;
1808                 }
1809                 pop_ok = pop_ok &&
1810                     v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
1811                 if (pop_ok) {
1812                     vfp_set_fpscr(env, fpscr);
1813                 }
1814                 if (cpu_isar_feature(aa32_mve, cpu)) {
1815                     pop_ok = pop_ok &&
1816                         v7m_stack_read(cpu, &env->v7m.vpr,
1817                                        frameptr + 0x64, mmu_idx);
1818                 }
1819                 if (!pop_ok) {
1820                     /*
1821                      * These regs are 0 if security extension present;
1822                      * otherwise merely UNKNOWN. We zero always.
1823                      */
1824                     for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1825                         *aa32_vfp_dreg(env, i / 2) = 0;
1826                     }
1827                     vfp_set_fpscr(env, 0);
1828                     if (cpu_isar_feature(aa32_mve, cpu)) {
1829                         env->v7m.vpr = 0;
1830                     }
1831                 }
1832             }
1833         }
1834         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1835                                                V7M_CONTROL, FPCA, !ftype);
1836 
1837         /* Commit to consuming the stack frame */
1838         frameptr += 0x20;
1839         if (!ftype) {
1840             frameptr += 0x48;
1841             if (restore_s16_s31) {
1842                 frameptr += 0x40;
1843             }
1844         }
1845         /*
1846          * Undo stack alignment (the SPREALIGN bit indicates that the original
1847          * pre-exception SP was not 8-aligned and we added a padding word to
1848          * align it, so we undo this by ORing in the bit that increases it
1849          * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1850          * would work too but a logical OR is how the pseudocode specifies it.)
1851          */
1852         if (xpsr & XPSR_SPREALIGN) {
1853             frameptr |= 4;
1854         }
1855         *frame_sp_p = frameptr;
1856     }
1857 
1858     xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
1859     if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
1860         xpsr_mask &= ~XPSR_GE;
1861     }
1862     /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1863     xpsr_write(env, xpsr, xpsr_mask);
1864 
1865     if (env->v7m.secure) {
1866         bool sfpa = xpsr & XPSR_SFPA;
1867 
1868         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1869                                                V7M_CONTROL, SFPA, sfpa);
1870     }
1871 
1872     /*
1873      * The restored xPSR exception field will be zero if we're
1874      * resuming in Thread mode. If that doesn't match what the
1875      * exception return excret specified then this is a UsageFault.
1876      * v7M requires we make this check here; v8M did it earlier.
1877      */
1878     if (return_to_handler != arm_v7m_is_handler_mode(env)) {
1879         /*
1880          * Take an INVPC UsageFault by pushing the stack again;
1881          * we know we're v7M so this is never a Secure UsageFault.
1882          */
1883         bool ignore_stackfaults;
1884 
1885         assert(!arm_feature(env, ARM_FEATURE_V8));
1886         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
1887         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1888         ignore_stackfaults = v7m_push_stack(cpu);
1889         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
1890                       "failed exception return integrity check\n");
1891         v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
1892         return;
1893     }
1894 
1895     /* Otherwise, we have a successful exception exit. */
1896     arm_clear_exclusive(env);
1897     arm_rebuild_hflags(env);
1898     qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
1899 }
1900 
1901 static bool do_v7m_function_return(ARMCPU *cpu)
1902 {
1903     /*
1904      * v8M security extensions magic function return.
1905      * We may either:
1906      *  (1) throw an exception (longjump)
1907      *  (2) return true if we successfully handled the function return
1908      *  (3) return false if we failed a consistency check and have
1909      *      pended a UsageFault that needs to be taken now
1910      *
1911      * At this point the magic return value is split between env->regs[15]
1912      * and env->thumb. We don't bother to reconstitute it because we don't
1913      * need it (all values are handled the same way).
1914      */
1915     CPUARMState *env = &cpu->env;
1916     uint32_t newpc, newpsr, newpsr_exc;
1917 
1918     qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
1919 
1920     {
1921         bool threadmode, spsel;
1922         MemOpIdx oi;
1923         ARMMMUIdx mmu_idx;
1924         uint32_t *frame_sp_p;
1925         uint32_t frameptr;
1926 
1927         /* Pull the return address and IPSR from the Secure stack */
1928         threadmode = !arm_v7m_is_handler_mode(env);
1929         spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
1930 
1931         frame_sp_p = arm_v7m_get_sp_ptr(env, true, threadmode, spsel);
1932         frameptr = *frame_sp_p;
1933 
1934         /*
1935          * These loads may throw an exception (for MPU faults). We want to
1936          * do them as secure, so work out what MMU index that is.
1937          */
1938         mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1939         oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx));
1940         newpc = cpu_ldl_mmu(env, frameptr, oi, 0);
1941         newpsr = cpu_ldl_mmu(env, frameptr + 4, oi, 0);
1942 
1943         /* Consistency checks on new IPSR */
1944         newpsr_exc = newpsr & XPSR_EXCP;
1945         if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
1946               (env->v7m.exception == 1 && newpsr_exc != 0))) {
1947             /* Pend the fault and tell our caller to take it */
1948             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1949             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1950                                     env->v7m.secure);
1951             qemu_log_mask(CPU_LOG_INT,
1952                           "...taking INVPC UsageFault: "
1953                           "IPSR consistency check failed\n");
1954             return false;
1955         }
1956 
1957         *frame_sp_p = frameptr + 8;
1958     }
1959 
1960     /* This invalidates frame_sp_p */
1961     switch_v7m_security_state(env, true);
1962     env->v7m.exception = newpsr_exc;
1963     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1964     if (newpsr & XPSR_SFPA) {
1965         env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
1966     }
1967     xpsr_write(env, 0, XPSR_IT);
1968     env->thumb = newpc & 1;
1969     env->regs[15] = newpc & ~1;
1970     arm_rebuild_hflags(env);
1971 
1972     qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
1973     return true;
1974 }
1975 
1976 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure,
1977                                uint32_t addr, uint16_t *insn)
1978 {
1979     /*
1980      * Load a 16-bit portion of a v7M instruction, returning true on success,
1981      * or false on failure (in which case we will have pended the appropriate
1982      * exception).
1983      * We need to do the instruction fetch's MPU and SAU checks
1984      * like this because there is no MMU index that would allow
1985      * doing the load with a single function call. Instead we must
1986      * first check that the security attributes permit the load
1987      * and that they don't mismatch on the two halves of the instruction,
1988      * and then we do the load as a secure load (ie using the security
1989      * attributes of the address, not the CPU, as architecturally required).
1990      */
1991     CPUState *cs = CPU(cpu);
1992     CPUARMState *env = &cpu->env;
1993     V8M_SAttributes sattrs = {};
1994     GetPhysAddrResult res = {};
1995     ARMMMUFaultInfo fi = {};
1996     MemTxResult txres;
1997 
1998     v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, secure, &sattrs);
1999     if (!sattrs.nsc || sattrs.ns) {
2000         /*
2001          * This must be the second half of the insn, and it straddles a
2002          * region boundary with the second half not being S&NSC.
2003          */
2004         env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2005         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2006         qemu_log_mask(CPU_LOG_INT,
2007                       "...really SecureFault with SFSR.INVEP\n");
2008         return false;
2009     }
2010     if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &res, &fi)) {
2011         /* the MPU lookup failed */
2012         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2013         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
2014         qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
2015         return false;
2016     }
2017     *insn = address_space_lduw_le(arm_addressspace(cs, res.f.attrs),
2018                                   res.f.phys_addr, res.f.attrs, &txres);
2019     if (txres != MEMTX_OK) {
2020         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2021         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2022         qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
2023         return false;
2024     }
2025     return true;
2026 }
2027 
2028 static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
2029                                    uint32_t addr, uint32_t *spdata)
2030 {
2031     /*
2032      * Read a word of data from the stack for the SG instruction,
2033      * writing the value into *spdata. If the load succeeds, return
2034      * true; otherwise pend an appropriate exception and return false.
2035      * (We can't use data load helpers here that throw an exception
2036      * because of the context we're called in, which is halfway through
2037      * arm_v7m_cpu_do_interrupt().)
2038      */
2039     CPUState *cs = CPU(cpu);
2040     CPUARMState *env = &cpu->env;
2041     MemTxResult txres;
2042     GetPhysAddrResult res = {};
2043     ARMMMUFaultInfo fi = {};
2044     uint32_t value;
2045 
2046     if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
2047         /* MPU/SAU lookup failed */
2048         if (fi.type == ARMFault_QEMU_SFault) {
2049             qemu_log_mask(CPU_LOG_INT,
2050                           "...SecureFault during stack word read\n");
2051             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
2052             env->v7m.sfar = addr;
2053             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2054         } else {
2055             qemu_log_mask(CPU_LOG_INT,
2056                           "...MemManageFault during stack word read\n");
2057             env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_DACCVIOL_MASK |
2058                 R_V7M_CFSR_MMARVALID_MASK;
2059             env->v7m.mmfar[M_REG_S] = addr;
2060             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, false);
2061         }
2062         return false;
2063     }
2064     value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
2065                               res.f.phys_addr, res.f.attrs, &txres);
2066     if (txres != MEMTX_OK) {
2067         /* BusFault trying to read the data */
2068         qemu_log_mask(CPU_LOG_INT,
2069                       "...BusFault during stack word read\n");
2070         env->v7m.cfsr[M_REG_NS] |=
2071             (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2072         env->v7m.bfar = addr;
2073         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2074         return false;
2075     }
2076 
2077     *spdata = value;
2078     return true;
2079 }
2080 
2081 static bool v7m_handle_execute_nsc(ARMCPU *cpu)
2082 {
2083     /*
2084      * Check whether this attempt to execute code in a Secure & NS-Callable
2085      * memory region is for an SG instruction; if so, then emulate the
2086      * effect of the SG instruction and return true. Otherwise pend
2087      * the correct kind of exception and return false.
2088      */
2089     CPUARMState *env = &cpu->env;
2090     ARMMMUIdx mmu_idx;
2091     uint16_t insn;
2092 
2093     /*
2094      * We should never get here unless get_phys_addr_pmsav8() caused
2095      * an exception for NS executing in S&NSC memory.
2096      */
2097     assert(!env->v7m.secure);
2098     assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2099 
2100     /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
2101     mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
2102 
2103     if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15], &insn)) {
2104         return false;
2105     }
2106 
2107     if (!env->thumb) {
2108         goto gen_invep;
2109     }
2110 
2111     if (insn != 0xe97f) {
2112         /*
2113          * Not an SG instruction first half (we choose the IMPDEF
2114          * early-SG-check option).
2115          */
2116         goto gen_invep;
2117     }
2118 
2119     if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15] + 2, &insn)) {
2120         return false;
2121     }
2122 
2123     if (insn != 0xe97f) {
2124         /*
2125          * Not an SG instruction second half (yes, both halves of the SG
2126          * insn have the same hex value)
2127          */
2128         goto gen_invep;
2129     }
2130 
2131     /*
2132      * OK, we have confirmed that we really have an SG instruction.
2133      * We know we're NS in S memory so don't need to repeat those checks.
2134      */
2135     qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
2136                   ", executing it\n", env->regs[15]);
2137 
2138     if (cpu_isar_feature(aa32_m_sec_state, cpu) &&
2139         !arm_v7m_is_handler_mode(env)) {
2140         /*
2141          * v8.1M exception stack frame integrity check. Note that we
2142          * must perform the memory access even if CCR_S.TRD is zero
2143          * and we aren't going to check what the data loaded is.
2144          */
2145         uint32_t spdata, sp;
2146 
2147         /*
2148          * We know we are currently NS, so the S stack pointers must be
2149          * in other_ss_{psp,msp}, not in regs[13]/other_sp.
2150          */
2151         sp = v7m_using_psp(env) ? env->v7m.other_ss_psp : env->v7m.other_ss_msp;
2152         if (!v7m_read_sg_stack_word(cpu, mmu_idx, sp, &spdata)) {
2153             /* Stack access failed and an exception has been pended */
2154             return false;
2155         }
2156 
2157         if (env->v7m.ccr[M_REG_S] & R_V7M_CCR_TRD_MASK) {
2158             if (((spdata & ~1) == 0xfefa125a) ||
2159                 !(env->v7m.control[M_REG_S] & 1)) {
2160                 goto gen_invep;
2161             }
2162         }
2163     }
2164 
2165     env->regs[14] &= ~1;
2166     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2167     switch_v7m_security_state(env, true);
2168     xpsr_write(env, 0, XPSR_IT);
2169     env->regs[15] += 4;
2170     arm_rebuild_hflags(env);
2171     return true;
2172 
2173 gen_invep:
2174     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2175     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2176     qemu_log_mask(CPU_LOG_INT,
2177                   "...really SecureFault with SFSR.INVEP\n");
2178     return false;
2179 }
2180 
2181 void arm_v7m_cpu_do_interrupt(CPUState *cs)
2182 {
2183     ARMCPU *cpu = ARM_CPU(cs);
2184     CPUARMState *env = &cpu->env;
2185     uint32_t lr;
2186     bool ignore_stackfaults;
2187 
2188     arm_log_exception(cs);
2189 
2190     /*
2191      * For exceptions we just mark as pending on the NVIC, and let that
2192      * handle it.
2193      */
2194     switch (cs->exception_index) {
2195     case EXCP_UDEF:
2196         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2197         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
2198         break;
2199     case EXCP_NOCP:
2200     {
2201         /*
2202          * NOCP might be directed to something other than the current
2203          * security state if this fault is because of NSACR; we indicate
2204          * the target security state using exception.target_el.
2205          */
2206         int target_secstate;
2207 
2208         if (env->exception.target_el == 3) {
2209             target_secstate = M_REG_S;
2210         } else {
2211             target_secstate = env->v7m.secure;
2212         }
2213         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
2214         env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
2215         break;
2216     }
2217     case EXCP_INVSTATE:
2218         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2219         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
2220         break;
2221     case EXCP_STKOF:
2222         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2223         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
2224         break;
2225     case EXCP_LSERR:
2226         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2227         env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
2228         break;
2229     case EXCP_UNALIGNED:
2230         /* Unaligned faults reported by M-profile aware code */
2231         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2232         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2233         break;
2234     case EXCP_DIVBYZERO:
2235         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2236         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_DIVBYZERO_MASK;
2237         break;
2238     case EXCP_SWI:
2239         /* The PC already points to the next instruction.  */
2240         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
2241         break;
2242     case EXCP_PREFETCH_ABORT:
2243     case EXCP_DATA_ABORT:
2244         /*
2245          * Note that for M profile we don't have a guest facing FSR, but
2246          * the env->exception.fsr will be populated by the code that
2247          * raises the fault, in the A profile short-descriptor format.
2248          *
2249          * Log the exception.vaddress now regardless of subtype, because
2250          * logging below only logs it when it goes into a guest visible
2251          * register.
2252          */
2253         qemu_log_mask(CPU_LOG_INT, "...at fault address 0x%x\n",
2254                       (uint32_t)env->exception.vaddress);
2255         switch (env->exception.fsr & 0xf) {
2256         case M_FAKE_FSR_NSC_EXEC:
2257             /*
2258              * Exception generated when we try to execute code at an address
2259              * which is marked as Secure & Non-Secure Callable and the CPU
2260              * is in the Non-Secure state. The only instruction which can
2261              * be executed like this is SG (and that only if both halves of
2262              * the SG instruction have the same security attributes.)
2263              * Everything else must generate an INVEP SecureFault, so we
2264              * emulate the SG instruction here.
2265              */
2266             if (v7m_handle_execute_nsc(cpu)) {
2267                 return;
2268             }
2269             break;
2270         case M_FAKE_FSR_SFAULT:
2271             /*
2272              * Various flavours of SecureFault for attempts to execute or
2273              * access data in the wrong security state.
2274              */
2275             switch (cs->exception_index) {
2276             case EXCP_PREFETCH_ABORT:
2277                 if (env->v7m.secure) {
2278                     env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
2279                     qemu_log_mask(CPU_LOG_INT,
2280                                   "...really SecureFault with SFSR.INVTRAN\n");
2281                 } else {
2282                     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2283                     qemu_log_mask(CPU_LOG_INT,
2284                                   "...really SecureFault with SFSR.INVEP\n");
2285                 }
2286                 break;
2287             case EXCP_DATA_ABORT:
2288                 /* This must be an NS access to S memory */
2289                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2290                 qemu_log_mask(CPU_LOG_INT,
2291                               "...really SecureFault with SFSR.AUVIOL\n");
2292                 break;
2293             }
2294             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2295             break;
2296         case 0x8: /* External Abort */
2297             switch (cs->exception_index) {
2298             case EXCP_PREFETCH_ABORT:
2299                 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2300                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
2301                 break;
2302             case EXCP_DATA_ABORT:
2303                 env->v7m.cfsr[M_REG_NS] |=
2304                     (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2305                 env->v7m.bfar = env->exception.vaddress;
2306                 qemu_log_mask(CPU_LOG_INT,
2307                               "...with CFSR.PRECISERR and BFAR 0x%x\n",
2308                               env->v7m.bfar);
2309                 break;
2310             }
2311             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2312             break;
2313         case 0x1: /* Alignment fault reported by generic code */
2314             qemu_log_mask(CPU_LOG_INT,
2315                           "...really UsageFault with UFSR.UNALIGNED\n");
2316             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2317             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
2318                                     env->v7m.secure);
2319             break;
2320         default:
2321             /*
2322              * All other FSR values are either MPU faults or "can't happen
2323              * for M profile" cases.
2324              */
2325             switch (cs->exception_index) {
2326             case EXCP_PREFETCH_ABORT:
2327                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2328                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
2329                 break;
2330             case EXCP_DATA_ABORT:
2331                 env->v7m.cfsr[env->v7m.secure] |=
2332                     (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
2333                 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
2334                 qemu_log_mask(CPU_LOG_INT,
2335                               "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2336                               env->v7m.mmfar[env->v7m.secure]);
2337                 break;
2338             }
2339             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
2340                                     env->v7m.secure);
2341             break;
2342         }
2343         break;
2344     case EXCP_SEMIHOST:
2345         qemu_log_mask(CPU_LOG_INT,
2346                       "...handling as semihosting call 0x%x\n",
2347                       env->regs[0]);
2348 #ifdef CONFIG_TCG
2349         do_common_semihosting(cs);
2350 #else
2351         g_assert_not_reached();
2352 #endif
2353         env->regs[15] += env->thumb ? 2 : 4;
2354         return;
2355     case EXCP_BKPT:
2356         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
2357         break;
2358     case EXCP_IRQ:
2359         break;
2360     case EXCP_EXCEPTION_EXIT:
2361         if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
2362             /* Must be v8M security extension function return */
2363             assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
2364             assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2365             if (do_v7m_function_return(cpu)) {
2366                 return;
2367             }
2368         } else {
2369             do_v7m_exception_exit(cpu);
2370             return;
2371         }
2372         break;
2373     case EXCP_LAZYFP:
2374         /*
2375          * We already pended the specific exception in the NVIC in the
2376          * v7m_preserve_fp_state() helper function.
2377          */
2378         break;
2379     default:
2380         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
2381         return; /* Never happens.  Keep compiler happy.  */
2382     }
2383 
2384     if (arm_feature(env, ARM_FEATURE_V8)) {
2385         lr = R_V7M_EXCRET_RES1_MASK |
2386             R_V7M_EXCRET_DCRS_MASK;
2387         /*
2388          * The S bit indicates whether we should return to Secure
2389          * or NonSecure (ie our current state).
2390          * The ES bit indicates whether we're taking this exception
2391          * to Secure or NonSecure (ie our target state). We set it
2392          * later, in v7m_exception_taken().
2393          * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2394          * This corresponds to the ARM ARM pseudocode for v8M setting
2395          * some LR bits in PushStack() and some in ExceptionTaken();
2396          * the distinction matters for the tailchain cases where we
2397          * can take an exception without pushing the stack.
2398          */
2399         if (env->v7m.secure) {
2400             lr |= R_V7M_EXCRET_S_MASK;
2401         }
2402     } else {
2403         lr = R_V7M_EXCRET_RES1_MASK |
2404             R_V7M_EXCRET_S_MASK |
2405             R_V7M_EXCRET_DCRS_MASK |
2406             R_V7M_EXCRET_ES_MASK;
2407         if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
2408             lr |= R_V7M_EXCRET_SPSEL_MASK;
2409         }
2410     }
2411     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
2412         lr |= R_V7M_EXCRET_FTYPE_MASK;
2413     }
2414     if (!arm_v7m_is_handler_mode(env)) {
2415         lr |= R_V7M_EXCRET_MODE_MASK;
2416     }
2417 
2418     ignore_stackfaults = v7m_push_stack(cpu);
2419     v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
2420 }
2421 
2422 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2423 {
2424     unsigned el = arm_current_el(env);
2425 
2426     /* First handle registers which unprivileged can read */
2427     switch (reg) {
2428     case 0 ... 7: /* xPSR sub-fields */
2429         return v7m_mrs_xpsr(env, reg, el);
2430     case 20: /* CONTROL */
2431         return arm_v7m_mrs_control(env, env->v7m.secure);
2432     case 0x94: /* CONTROL_NS */
2433         /*
2434          * We have to handle this here because unprivileged Secure code
2435          * can read the NS CONTROL register.
2436          */
2437         if (!env->v7m.secure) {
2438             return 0;
2439         }
2440         return env->v7m.control[M_REG_NS] |
2441             (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
2442     }
2443 
2444     if (el == 0) {
2445         return 0; /* unprivileged reads others as zero */
2446     }
2447 
2448     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2449         switch (reg) {
2450         case 0x88: /* MSP_NS */
2451             if (!env->v7m.secure) {
2452                 return 0;
2453             }
2454             return env->v7m.other_ss_msp;
2455         case 0x89: /* PSP_NS */
2456             if (!env->v7m.secure) {
2457                 return 0;
2458             }
2459             return env->v7m.other_ss_psp;
2460         case 0x8a: /* MSPLIM_NS */
2461             if (!env->v7m.secure) {
2462                 return 0;
2463             }
2464             return env->v7m.msplim[M_REG_NS];
2465         case 0x8b: /* PSPLIM_NS */
2466             if (!env->v7m.secure) {
2467                 return 0;
2468             }
2469             return env->v7m.psplim[M_REG_NS];
2470         case 0x90: /* PRIMASK_NS */
2471             if (!env->v7m.secure) {
2472                 return 0;
2473             }
2474             return env->v7m.primask[M_REG_NS];
2475         case 0x91: /* BASEPRI_NS */
2476             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2477                 goto bad_reg;
2478             }
2479             if (!env->v7m.secure) {
2480                 return 0;
2481             }
2482             return env->v7m.basepri[M_REG_NS];
2483         case 0x93: /* FAULTMASK_NS */
2484             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2485                 goto bad_reg;
2486             }
2487             if (!env->v7m.secure) {
2488                 return 0;
2489             }
2490             return env->v7m.faultmask[M_REG_NS];
2491         case 0x98: /* SP_NS */
2492         {
2493             /*
2494              * This gives the non-secure SP selected based on whether we're
2495              * currently in handler mode or not, using the NS CONTROL.SPSEL.
2496              */
2497             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2498 
2499             if (!env->v7m.secure) {
2500                 return 0;
2501             }
2502             if (!arm_v7m_is_handler_mode(env) && spsel) {
2503                 return env->v7m.other_ss_psp;
2504             } else {
2505                 return env->v7m.other_ss_msp;
2506             }
2507         }
2508         default:
2509             break;
2510         }
2511     }
2512 
2513     switch (reg) {
2514     case 8: /* MSP */
2515         return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
2516     case 9: /* PSP */
2517         return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
2518     case 10: /* MSPLIM */
2519         if (!arm_feature(env, ARM_FEATURE_V8)) {
2520             goto bad_reg;
2521         }
2522         return env->v7m.msplim[env->v7m.secure];
2523     case 11: /* PSPLIM */
2524         if (!arm_feature(env, ARM_FEATURE_V8)) {
2525             goto bad_reg;
2526         }
2527         return env->v7m.psplim[env->v7m.secure];
2528     case 16: /* PRIMASK */
2529         return env->v7m.primask[env->v7m.secure];
2530     case 17: /* BASEPRI */
2531     case 18: /* BASEPRI_MAX */
2532         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2533             goto bad_reg;
2534         }
2535         return env->v7m.basepri[env->v7m.secure];
2536     case 19: /* FAULTMASK */
2537         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2538             goto bad_reg;
2539         }
2540         return env->v7m.faultmask[env->v7m.secure];
2541     default:
2542     bad_reg:
2543         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
2544                                        " register %d\n", reg);
2545         return 0;
2546     }
2547 }
2548 
2549 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
2550 {
2551     /*
2552      * We're passed bits [11..0] of the instruction; extract
2553      * SYSm and the mask bits.
2554      * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2555      * we choose to treat them as if the mask bits were valid.
2556      * NB that the pseudocode 'mask' variable is bits [11..10],
2557      * whereas ours is [11..8].
2558      */
2559     uint32_t mask = extract32(maskreg, 8, 4);
2560     uint32_t reg = extract32(maskreg, 0, 8);
2561     int cur_el = arm_current_el(env);
2562 
2563     if (cur_el == 0 && reg > 7 && reg != 20) {
2564         /*
2565          * only xPSR sub-fields and CONTROL.SFPA may be written by
2566          * unprivileged code
2567          */
2568         return;
2569     }
2570 
2571     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2572         switch (reg) {
2573         case 0x88: /* MSP_NS */
2574             if (!env->v7m.secure) {
2575                 return;
2576             }
2577             env->v7m.other_ss_msp = val & ~3;
2578             return;
2579         case 0x89: /* PSP_NS */
2580             if (!env->v7m.secure) {
2581                 return;
2582             }
2583             env->v7m.other_ss_psp = val & ~3;
2584             return;
2585         case 0x8a: /* MSPLIM_NS */
2586             if (!env->v7m.secure) {
2587                 return;
2588             }
2589             env->v7m.msplim[M_REG_NS] = val & ~7;
2590             return;
2591         case 0x8b: /* PSPLIM_NS */
2592             if (!env->v7m.secure) {
2593                 return;
2594             }
2595             env->v7m.psplim[M_REG_NS] = val & ~7;
2596             return;
2597         case 0x90: /* PRIMASK_NS */
2598             if (!env->v7m.secure) {
2599                 return;
2600             }
2601             env->v7m.primask[M_REG_NS] = val & 1;
2602             return;
2603         case 0x91: /* BASEPRI_NS */
2604             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2605                 goto bad_reg;
2606             }
2607             if (!env->v7m.secure) {
2608                 return;
2609             }
2610             env->v7m.basepri[M_REG_NS] = val & 0xff;
2611             return;
2612         case 0x93: /* FAULTMASK_NS */
2613             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2614                 goto bad_reg;
2615             }
2616             if (!env->v7m.secure) {
2617                 return;
2618             }
2619             env->v7m.faultmask[M_REG_NS] = val & 1;
2620             return;
2621         case 0x94: /* CONTROL_NS */
2622             if (!env->v7m.secure) {
2623                 return;
2624             }
2625             write_v7m_control_spsel_for_secstate(env,
2626                                                  val & R_V7M_CONTROL_SPSEL_MASK,
2627                                                  M_REG_NS);
2628             if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
2629                 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
2630                 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
2631             }
2632             /*
2633              * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2634              * RES0 if the FPU is not present, and is stored in the S bank
2635              */
2636             if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env)) &&
2637                 extract32(env->v7m.nsacr, 10, 1)) {
2638                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2639                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2640             }
2641             return;
2642         case 0x98: /* SP_NS */
2643         {
2644             /*
2645              * This gives the non-secure SP selected based on whether we're
2646              * currently in handler mode or not, using the NS CONTROL.SPSEL.
2647              */
2648             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2649             bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
2650             uint32_t limit;
2651 
2652             if (!env->v7m.secure) {
2653                 return;
2654             }
2655 
2656             limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
2657 
2658             val &= ~0x3;
2659 
2660             if (val < limit) {
2661                 raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
2662             }
2663 
2664             if (is_psp) {
2665                 env->v7m.other_ss_psp = val;
2666             } else {
2667                 env->v7m.other_ss_msp = val;
2668             }
2669             return;
2670         }
2671         default:
2672             break;
2673         }
2674     }
2675 
2676     switch (reg) {
2677     case 0 ... 7: /* xPSR sub-fields */
2678         v7m_msr_xpsr(env, mask, reg, val);
2679         break;
2680     case 8: /* MSP */
2681         if (v7m_using_psp(env)) {
2682             env->v7m.other_sp = val & ~3;
2683         } else {
2684             env->regs[13] = val & ~3;
2685         }
2686         break;
2687     case 9: /* PSP */
2688         if (v7m_using_psp(env)) {
2689             env->regs[13] = val & ~3;
2690         } else {
2691             env->v7m.other_sp = val & ~3;
2692         }
2693         break;
2694     case 10: /* MSPLIM */
2695         if (!arm_feature(env, ARM_FEATURE_V8)) {
2696             goto bad_reg;
2697         }
2698         env->v7m.msplim[env->v7m.secure] = val & ~7;
2699         break;
2700     case 11: /* PSPLIM */
2701         if (!arm_feature(env, ARM_FEATURE_V8)) {
2702             goto bad_reg;
2703         }
2704         env->v7m.psplim[env->v7m.secure] = val & ~7;
2705         break;
2706     case 16: /* PRIMASK */
2707         env->v7m.primask[env->v7m.secure] = val & 1;
2708         break;
2709     case 17: /* BASEPRI */
2710         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2711             goto bad_reg;
2712         }
2713         env->v7m.basepri[env->v7m.secure] = val & 0xff;
2714         break;
2715     case 18: /* BASEPRI_MAX */
2716         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2717             goto bad_reg;
2718         }
2719         val &= 0xff;
2720         if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
2721                          || env->v7m.basepri[env->v7m.secure] == 0)) {
2722             env->v7m.basepri[env->v7m.secure] = val;
2723         }
2724         break;
2725     case 19: /* FAULTMASK */
2726         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2727             goto bad_reg;
2728         }
2729         env->v7m.faultmask[env->v7m.secure] = val & 1;
2730         break;
2731     case 20: /* CONTROL */
2732         /*
2733          * Writing to the SPSEL bit only has an effect if we are in
2734          * thread mode; other bits can be updated by any privileged code.
2735          * write_v7m_control_spsel() deals with updating the SPSEL bit in
2736          * env->v7m.control, so we only need update the others.
2737          * For v7M, we must just ignore explicit writes to SPSEL in handler
2738          * mode; for v8M the write is permitted but will have no effect.
2739          * All these bits are writes-ignored from non-privileged code,
2740          * except for SFPA.
2741          */
2742         if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
2743                            !arm_v7m_is_handler_mode(env))) {
2744             write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
2745         }
2746         if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
2747             env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
2748             env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
2749         }
2750         if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
2751             /*
2752              * SFPA is RAZ/WI from NS or if no FPU.
2753              * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2754              * Both are stored in the S bank.
2755              */
2756             if (env->v7m.secure) {
2757                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2758                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
2759             }
2760             if (cur_el > 0 &&
2761                 (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
2762                  extract32(env->v7m.nsacr, 10, 1))) {
2763                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2764                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2765             }
2766         }
2767         break;
2768     default:
2769     bad_reg:
2770         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
2771                                        " register %d\n", reg);
2772         return;
2773     }
2774 }
2775 
2776 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2777 {
2778     /* Implement the TT instruction. op is bits [7:6] of the insn. */
2779     bool forceunpriv = op & 1;
2780     bool alt = op & 2;
2781     V8M_SAttributes sattrs = {};
2782     uint32_t tt_resp;
2783     bool r, rw, nsr, nsrw, mrvalid;
2784     ARMMMUIdx mmu_idx;
2785     uint32_t mregion;
2786     bool targetpriv;
2787     bool targetsec = env->v7m.secure;
2788 
2789     /*
2790      * Work out what the security state and privilege level we're
2791      * interested in is...
2792      */
2793     if (alt) {
2794         targetsec = !targetsec;
2795     }
2796 
2797     if (forceunpriv) {
2798         targetpriv = false;
2799     } else {
2800         targetpriv = arm_v7m_is_handler_mode(env) ||
2801             !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
2802     }
2803 
2804     /* ...and then figure out which MMU index this is */
2805     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
2806 
2807     /*
2808      * We know that the MPU and SAU don't care about the access type
2809      * for our purposes beyond that we don't want to claim to be
2810      * an insn fetch, so we arbitrarily call this a read.
2811      */
2812 
2813     /*
2814      * MPU region info only available for privileged or if
2815      * inspecting the other MPU state.
2816      */
2817     if (arm_current_el(env) != 0 || alt) {
2818         GetPhysAddrResult res = {};
2819         ARMMMUFaultInfo fi = {};
2820 
2821         /* We can ignore the return value as prot is always set */
2822         pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, targetsec,
2823                           &res, &fi, &mregion);
2824         if (mregion == -1) {
2825             mrvalid = false;
2826             mregion = 0;
2827         } else {
2828             mrvalid = true;
2829         }
2830         r = res.f.prot & PAGE_READ;
2831         rw = res.f.prot & PAGE_WRITE;
2832     } else {
2833         r = false;
2834         rw = false;
2835         mrvalid = false;
2836         mregion = 0;
2837     }
2838 
2839     if (env->v7m.secure) {
2840         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
2841                             targetsec, &sattrs);
2842         nsr = sattrs.ns && r;
2843         nsrw = sattrs.ns && rw;
2844     } else {
2845         sattrs.ns = true;
2846         nsr = false;
2847         nsrw = false;
2848     }
2849 
2850     tt_resp = (sattrs.iregion << 24) |
2851         (sattrs.irvalid << 23) |
2852         ((!sattrs.ns) << 22) |
2853         (nsrw << 21) |
2854         (nsr << 20) |
2855         (rw << 19) |
2856         (r << 18) |
2857         (sattrs.srvalid << 17) |
2858         (mrvalid << 16) |
2859         (sattrs.sregion << 8) |
2860         mregion;
2861 
2862     return tt_resp;
2863 }
2864 
2865 #endif /* !CONFIG_USER_ONLY */
2866 
2867 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
2868                              bool spsel)
2869 {
2870     /*
2871      * Return a pointer to the location where we currently store the
2872      * stack pointer for the requested security state and thread mode.
2873      * This pointer will become invalid if the CPU state is updated
2874      * such that the stack pointers are switched around (eg changing
2875      * the SPSEL control bit).
2876      * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
2877      * Unlike that pseudocode, we require the caller to pass us in the
2878      * SPSEL control bit value; this is because we also use this
2879      * function in handling of pushing of the callee-saves registers
2880      * part of the v8M stack frame (pseudocode PushCalleeStack()),
2881      * and in the tailchain codepath the SPSEL bit comes from the exception
2882      * return magic LR value from the previous exception. The pseudocode
2883      * opencodes the stack-selection in PushCalleeStack(), but we prefer
2884      * to make this utility function generic enough to do the job.
2885      */
2886     bool want_psp = threadmode && spsel;
2887 
2888     if (secure == env->v7m.secure) {
2889         if (want_psp == v7m_using_psp(env)) {
2890             return &env->regs[13];
2891         } else {
2892             return &env->v7m.other_sp;
2893         }
2894     } else {
2895         if (want_psp) {
2896             return &env->v7m.other_ss_psp;
2897         } else {
2898             return &env->v7m.other_ss_msp;
2899         }
2900     }
2901 }
2902