1 /*
2  * ARM generic helpers.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "target/arm/idau.h"
12 #include "trace.h"
13 #include "cpu.h"
14 #include "internals.h"
15 #include "exec/gdbstub.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include <zlib.h> /* For crc32 */
24 #include "hw/semihosting/semihost.h"
25 #include "sysemu/cpus.h"
26 #include "sysemu/kvm.h"
27 #include "qemu/range.h"
28 #include "qapi/qapi-commands-machine-target.h"
29 #include "qapi/error.h"
30 #include "qemu/guest-random.h"
31 #ifdef CONFIG_TCG
32 #include "arm_ldst.h"
33 #include "exec/cpu_ldst.h"
34 #endif
35 
v7m_msr_xpsr(CPUARMState * env,uint32_t mask,uint32_t reg,uint32_t val)36 static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask,
37                          uint32_t reg, uint32_t val)
38 {
39     /* Only APSR is actually writable */
40     if (!(reg & 4)) {
41         uint32_t apsrmask = 0;
42 
43         if (mask & 8) {
44             apsrmask |= XPSR_NZCV | XPSR_Q;
45         }
46         if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
47             apsrmask |= XPSR_GE;
48         }
49         xpsr_write(env, val, apsrmask);
50     }
51 }
52 
v7m_mrs_xpsr(CPUARMState * env,uint32_t reg,unsigned el)53 static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el)
54 {
55     uint32_t mask = 0;
56 
57     if ((reg & 1) && el) {
58         mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
59     }
60     if (!(reg & 4)) {
61         mask |= XPSR_NZCV | XPSR_Q; /* APSR */
62         if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
63             mask |= XPSR_GE;
64         }
65     }
66     /* EPSR reads as zero */
67     return xpsr_read(env) & mask;
68 }
69 
v7m_mrs_control(CPUARMState * env,uint32_t secure)70 static uint32_t v7m_mrs_control(CPUARMState *env, uint32_t secure)
71 {
72     uint32_t value = env->v7m.control[secure];
73 
74     if (!secure) {
75         /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
76         value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
77     }
78     return value;
79 }
80 
81 #ifdef CONFIG_USER_ONLY
82 
HELPER(v7m_msr)83 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
84 {
85     uint32_t mask = extract32(maskreg, 8, 4);
86     uint32_t reg = extract32(maskreg, 0, 8);
87 
88     switch (reg) {
89     case 0 ... 7: /* xPSR sub-fields */
90         v7m_msr_xpsr(env, mask, reg, val);
91         break;
92     case 20: /* CONTROL */
93         /* There are no sub-fields that are actually writable from EL0. */
94         break;
95     default:
96         /* Unprivileged writes to other registers are ignored */
97         break;
98     }
99 }
100 
HELPER(v7m_mrs)101 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
102 {
103     switch (reg) {
104     case 0 ... 7: /* xPSR sub-fields */
105         return v7m_mrs_xpsr(env, reg, 0);
106     case 20: /* CONTROL */
107         return v7m_mrs_control(env, 0);
108     default:
109         /* Unprivileged reads others as zero.  */
110         return 0;
111     }
112 }
113 
HELPER(v7m_bxns)114 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
115 {
116     /* translate.c should never generate calls here in user-only mode */
117     g_assert_not_reached();
118 }
119 
HELPER(v7m_blxns)120 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
121 {
122     /* translate.c should never generate calls here in user-only mode */
123     g_assert_not_reached();
124 }
125 
HELPER(v7m_preserve_fp_state)126 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
127 {
128     /* translate.c should never generate calls here in user-only mode */
129     g_assert_not_reached();
130 }
131 
HELPER(v7m_vlstm)132 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
133 {
134     /* translate.c should never generate calls here in user-only mode */
135     g_assert_not_reached();
136 }
137 
HELPER(v7m_vlldm)138 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
139 {
140     /* translate.c should never generate calls here in user-only mode */
141     g_assert_not_reached();
142 }
143 
HELPER(v7m_tt)144 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
145 {
146     /*
147      * The TT instructions can be used by unprivileged code, but in
148      * user-only emulation we don't have the MPU.
149      * Luckily since we know we are NonSecure unprivileged (and that in
150      * turn means that the A flag wasn't specified), all the bits in the
151      * register must be zero:
152      *  IREGION: 0 because IRVALID is 0
153      *  IRVALID: 0 because NS
154      *  S: 0 because NS
155      *  NSRW: 0 because NS
156      *  NSR: 0 because NS
157      *  RW: 0 because unpriv and A flag not set
158      *  R: 0 because unpriv and A flag not set
159      *  SRVALID: 0 because NS
160      *  MRVALID: 0 because unpriv and A flag not set
161      *  SREGION: 0 becaus SRVALID is 0
162      *  MREGION: 0 because MRVALID is 0
163      */
164     return 0;
165 }
166 
167 #else
168 
169 /*
170  * What kind of stack write are we doing? This affects how exceptions
171  * generated during the stacking are treated.
172  */
173 typedef enum StackingMode {
174     STACK_NORMAL,
175     STACK_IGNFAULTS,
176     STACK_LAZYFP,
177 } StackingMode;
178 
v7m_stack_write(ARMCPU * cpu,uint32_t addr,uint32_t value,ARMMMUIdx mmu_idx,StackingMode mode)179 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
180                             ARMMMUIdx mmu_idx, StackingMode mode)
181 {
182     CPUState *cs = CPU(cpu);
183     CPUARMState *env = &cpu->env;
184     MemTxAttrs attrs = {};
185     MemTxResult txres;
186     target_ulong page_size;
187     hwaddr physaddr;
188     int prot;
189     ARMMMUFaultInfo fi = {};
190     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
191     int exc;
192     bool exc_secure;
193 
194     if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
195                       &attrs, &prot, &page_size, &fi, NULL)) {
196         /* MPU/SAU lookup failed */
197         if (fi.type == ARMFault_QEMU_SFault) {
198             if (mode == STACK_LAZYFP) {
199                 qemu_log_mask(CPU_LOG_INT,
200                               "...SecureFault with SFSR.LSPERR "
201                               "during lazy stacking\n");
202                 env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
203             } else {
204                 qemu_log_mask(CPU_LOG_INT,
205                               "...SecureFault with SFSR.AUVIOL "
206                               "during stacking\n");
207                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
208             }
209             env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
210             env->v7m.sfar = addr;
211             exc = ARMV7M_EXCP_SECURE;
212             exc_secure = false;
213         } else {
214             if (mode == STACK_LAZYFP) {
215                 qemu_log_mask(CPU_LOG_INT,
216                               "...MemManageFault with CFSR.MLSPERR\n");
217                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
218             } else {
219                 qemu_log_mask(CPU_LOG_INT,
220                               "...MemManageFault with CFSR.MSTKERR\n");
221                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
222             }
223             exc = ARMV7M_EXCP_MEM;
224             exc_secure = secure;
225         }
226         goto pend_fault;
227     }
228     address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
229                          attrs, &txres);
230     if (txres != MEMTX_OK) {
231         /* BusFault trying to write the data */
232         if (mode == STACK_LAZYFP) {
233             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
234             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
235         } else {
236             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
237             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
238         }
239         exc = ARMV7M_EXCP_BUS;
240         exc_secure = false;
241         goto pend_fault;
242     }
243     return true;
244 
245 pend_fault:
246     /*
247      * By pending the exception at this point we are making
248      * the IMPDEF choice "overridden exceptions pended" (see the
249      * MergeExcInfo() pseudocode). The other choice would be to not
250      * pend them now and then make a choice about which to throw away
251      * later if we have two derived exceptions.
252      * The only case when we must not pend the exception but instead
253      * throw it away is if we are doing the push of the callee registers
254      * and we've already generated a derived exception (this is indicated
255      * by the caller passing STACK_IGNFAULTS). Even in this case we will
256      * still update the fault status registers.
257      */
258     switch (mode) {
259     case STACK_NORMAL:
260         armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
261         break;
262     case STACK_LAZYFP:
263         armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
264         break;
265     case STACK_IGNFAULTS:
266         break;
267     }
268     return false;
269 }
270 
v7m_stack_read(ARMCPU * cpu,uint32_t * dest,uint32_t addr,ARMMMUIdx mmu_idx)271 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
272                            ARMMMUIdx mmu_idx)
273 {
274     CPUState *cs = CPU(cpu);
275     CPUARMState *env = &cpu->env;
276     MemTxAttrs attrs = {};
277     MemTxResult txres;
278     target_ulong page_size;
279     hwaddr physaddr;
280     int prot;
281     ARMMMUFaultInfo fi = {};
282     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
283     int exc;
284     bool exc_secure;
285     uint32_t value;
286 
287     if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
288                       &attrs, &prot, &page_size, &fi, NULL)) {
289         /* MPU/SAU lookup failed */
290         if (fi.type == ARMFault_QEMU_SFault) {
291             qemu_log_mask(CPU_LOG_INT,
292                           "...SecureFault with SFSR.AUVIOL during unstack\n");
293             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
294             env->v7m.sfar = addr;
295             exc = ARMV7M_EXCP_SECURE;
296             exc_secure = false;
297         } else {
298             qemu_log_mask(CPU_LOG_INT,
299                           "...MemManageFault with CFSR.MUNSTKERR\n");
300             env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
301             exc = ARMV7M_EXCP_MEM;
302             exc_secure = secure;
303         }
304         goto pend_fault;
305     }
306 
307     value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
308                               attrs, &txres);
309     if (txres != MEMTX_OK) {
310         /* BusFault trying to read the data */
311         qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
312         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
313         exc = ARMV7M_EXCP_BUS;
314         exc_secure = false;
315         goto pend_fault;
316     }
317 
318     *dest = value;
319     return true;
320 
321 pend_fault:
322     /*
323      * By pending the exception at this point we are making
324      * the IMPDEF choice "overridden exceptions pended" (see the
325      * MergeExcInfo() pseudocode). The other choice would be to not
326      * pend them now and then make a choice about which to throw away
327      * later if we have two derived exceptions.
328      */
329     armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
330     return false;
331 }
332 
HELPER(v7m_preserve_fp_state)333 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
334 {
335     /*
336      * Preserve FP state (because LSPACT was set and we are about
337      * to execute an FP instruction). This corresponds to the
338      * PreserveFPState() pseudocode.
339      * We may throw an exception if the stacking fails.
340      */
341     ARMCPU *cpu = env_archcpu(env);
342     bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
343     bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
344     bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
345     bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
346     uint32_t fpcar = env->v7m.fpcar[is_secure];
347     bool stacked_ok = true;
348     bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
349     bool take_exception;
350 
351     /* Take the iothread lock as we are going to touch the NVIC */
352     qemu_mutex_lock_iothread();
353 
354     /* Check the background context had access to the FPU */
355     if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
356         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
357         env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
358         stacked_ok = false;
359     } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
360         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
361         env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
362         stacked_ok = false;
363     }
364 
365     if (!splimviol && stacked_ok) {
366         /* We only stack if the stack limit wasn't violated */
367         int i;
368         ARMMMUIdx mmu_idx;
369 
370         mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
371         for (i = 0; i < (ts ? 32 : 16); i += 2) {
372             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
373             uint32_t faddr = fpcar + 4 * i;
374             uint32_t slo = extract64(dn, 0, 32);
375             uint32_t shi = extract64(dn, 32, 32);
376 
377             if (i >= 16) {
378                 faddr += 8; /* skip the slot for the FPSCR */
379             }
380             stacked_ok = stacked_ok &&
381                 v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
382                 v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
383         }
384 
385         stacked_ok = stacked_ok &&
386             v7m_stack_write(cpu, fpcar + 0x40,
387                             vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
388     }
389 
390     /*
391      * We definitely pended an exception, but it's possible that it
392      * might not be able to be taken now. If its priority permits us
393      * to take it now, then we must not update the LSPACT or FP regs,
394      * but instead jump out to take the exception immediately.
395      * If it's just pending and won't be taken until the current
396      * handler exits, then we do update LSPACT and the FP regs.
397      */
398     take_exception = !stacked_ok &&
399         armv7m_nvic_can_take_pending_exception(env->nvic);
400 
401     qemu_mutex_unlock_iothread();
402 
403     if (take_exception) {
404         raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
405     }
406 
407     env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
408 
409     if (ts) {
410         /* Clear s0 to s31 and the FPSCR */
411         int i;
412 
413         for (i = 0; i < 32; i += 2) {
414             *aa32_vfp_dreg(env, i / 2) = 0;
415         }
416         vfp_set_fpscr(env, 0);
417     }
418     /*
419      * Otherwise s0 to s15 and FPSCR are UNKNOWN; we choose to leave them
420      * unchanged.
421      */
422 }
423 
424 /*
425  * Write to v7M CONTROL.SPSEL bit for the specified security bank.
426  * This may change the current stack pointer between Main and Process
427  * stack pointers if it is done for the CONTROL register for the current
428  * security state.
429  */
write_v7m_control_spsel_for_secstate(CPUARMState * env,bool new_spsel,bool secstate)430 static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
431                                                  bool new_spsel,
432                                                  bool secstate)
433 {
434     bool old_is_psp = v7m_using_psp(env);
435 
436     env->v7m.control[secstate] =
437         deposit32(env->v7m.control[secstate],
438                   R_V7M_CONTROL_SPSEL_SHIFT,
439                   R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
440 
441     if (secstate == env->v7m.secure) {
442         bool new_is_psp = v7m_using_psp(env);
443         uint32_t tmp;
444 
445         if (old_is_psp != new_is_psp) {
446             tmp = env->v7m.other_sp;
447             env->v7m.other_sp = env->regs[13];
448             env->regs[13] = tmp;
449         }
450     }
451 }
452 
453 /*
454  * Write to v7M CONTROL.SPSEL bit. This may change the current
455  * stack pointer between Main and Process stack pointers.
456  */
write_v7m_control_spsel(CPUARMState * env,bool new_spsel)457 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
458 {
459     write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
460 }
461 
write_v7m_exception(CPUARMState * env,uint32_t new_exc)462 void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
463 {
464     /*
465      * Write a new value to v7m.exception, thus transitioning into or out
466      * of Handler mode; this may result in a change of active stack pointer.
467      */
468     bool new_is_psp, old_is_psp = v7m_using_psp(env);
469     uint32_t tmp;
470 
471     env->v7m.exception = new_exc;
472 
473     new_is_psp = v7m_using_psp(env);
474 
475     if (old_is_psp != new_is_psp) {
476         tmp = env->v7m.other_sp;
477         env->v7m.other_sp = env->regs[13];
478         env->regs[13] = tmp;
479     }
480 }
481 
482 /* Switch M profile security state between NS and S */
switch_v7m_security_state(CPUARMState * env,bool new_secstate)483 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
484 {
485     uint32_t new_ss_msp, new_ss_psp;
486 
487     if (env->v7m.secure == new_secstate) {
488         return;
489     }
490 
491     /*
492      * All the banked state is accessed by looking at env->v7m.secure
493      * except for the stack pointer; rearrange the SP appropriately.
494      */
495     new_ss_msp = env->v7m.other_ss_msp;
496     new_ss_psp = env->v7m.other_ss_psp;
497 
498     if (v7m_using_psp(env)) {
499         env->v7m.other_ss_psp = env->regs[13];
500         env->v7m.other_ss_msp = env->v7m.other_sp;
501     } else {
502         env->v7m.other_ss_msp = env->regs[13];
503         env->v7m.other_ss_psp = env->v7m.other_sp;
504     }
505 
506     env->v7m.secure = new_secstate;
507 
508     if (v7m_using_psp(env)) {
509         env->regs[13] = new_ss_psp;
510         env->v7m.other_sp = new_ss_msp;
511     } else {
512         env->regs[13] = new_ss_msp;
513         env->v7m.other_sp = new_ss_psp;
514     }
515 }
516 
HELPER(v7m_bxns)517 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
518 {
519     /*
520      * Handle v7M BXNS:
521      *  - if the return value is a magic value, do exception return (like BX)
522      *  - otherwise bit 0 of the return value is the target security state
523      */
524     uint32_t min_magic;
525 
526     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
527         /* Covers FNC_RETURN and EXC_RETURN magic */
528         min_magic = FNC_RETURN_MIN_MAGIC;
529     } else {
530         /* EXC_RETURN magic only */
531         min_magic = EXC_RETURN_MIN_MAGIC;
532     }
533 
534     if (dest >= min_magic) {
535         /*
536          * This is an exception return magic value; put it where
537          * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
538          * Note that if we ever add gen_ss_advance() singlestep support to
539          * M profile this should count as an "instruction execution complete"
540          * event (compare gen_bx_excret_final_code()).
541          */
542         env->regs[15] = dest & ~1;
543         env->thumb = dest & 1;
544         HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
545         /* notreached */
546     }
547 
548     /* translate.c should have made BXNS UNDEF unless we're secure */
549     assert(env->v7m.secure);
550 
551     if (!(dest & 1)) {
552         env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
553     }
554     switch_v7m_security_state(env, dest & 1);
555     env->thumb = 1;
556     env->regs[15] = dest & ~1;
557     arm_rebuild_hflags(env);
558 }
559 
HELPER(v7m_blxns)560 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
561 {
562     /*
563      * Handle v7M BLXNS:
564      *  - bit 0 of the destination address is the target security state
565      */
566 
567     /* At this point regs[15] is the address just after the BLXNS */
568     uint32_t nextinst = env->regs[15] | 1;
569     uint32_t sp = env->regs[13] - 8;
570     uint32_t saved_psr;
571 
572     /* translate.c will have made BLXNS UNDEF unless we're secure */
573     assert(env->v7m.secure);
574 
575     if (dest & 1) {
576         /*
577          * Target is Secure, so this is just a normal BLX,
578          * except that the low bit doesn't indicate Thumb/not.
579          */
580         env->regs[14] = nextinst;
581         env->thumb = 1;
582         env->regs[15] = dest & ~1;
583         return;
584     }
585 
586     /* Target is non-secure: first push a stack frame */
587     if (!QEMU_IS_ALIGNED(sp, 8)) {
588         qemu_log_mask(LOG_GUEST_ERROR,
589                       "BLXNS with misaligned SP is UNPREDICTABLE\n");
590     }
591 
592     if (sp < v7m_sp_limit(env)) {
593         raise_exception(env, EXCP_STKOF, 0, 1);
594     }
595 
596     saved_psr = env->v7m.exception;
597     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
598         saved_psr |= XPSR_SFPA;
599     }
600 
601     /* Note that these stores can throw exceptions on MPU faults */
602     cpu_stl_data_ra(env, sp, nextinst, GETPC());
603     cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
604 
605     env->regs[13] = sp;
606     env->regs[14] = 0xfeffffff;
607     if (arm_v7m_is_handler_mode(env)) {
608         /*
609          * Write a dummy value to IPSR, to avoid leaking the current secure
610          * exception number to non-secure code. This is guaranteed not
611          * to cause write_v7m_exception() to actually change stacks.
612          */
613         write_v7m_exception(env, 1);
614     }
615     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
616     switch_v7m_security_state(env, 0);
617     env->thumb = 1;
618     env->regs[15] = dest;
619     arm_rebuild_hflags(env);
620 }
621 
get_v7m_sp_ptr(CPUARMState * env,bool secure,bool threadmode,bool spsel)622 static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
623                                 bool spsel)
624 {
625     /*
626      * Return a pointer to the location where we currently store the
627      * stack pointer for the requested security state and thread mode.
628      * This pointer will become invalid if the CPU state is updated
629      * such that the stack pointers are switched around (eg changing
630      * the SPSEL control bit).
631      * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
632      * Unlike that pseudocode, we require the caller to pass us in the
633      * SPSEL control bit value; this is because we also use this
634      * function in handling of pushing of the callee-saves registers
635      * part of the v8M stack frame (pseudocode PushCalleeStack()),
636      * and in the tailchain codepath the SPSEL bit comes from the exception
637      * return magic LR value from the previous exception. The pseudocode
638      * opencodes the stack-selection in PushCalleeStack(), but we prefer
639      * to make this utility function generic enough to do the job.
640      */
641     bool want_psp = threadmode && spsel;
642 
643     if (secure == env->v7m.secure) {
644         if (want_psp == v7m_using_psp(env)) {
645             return &env->regs[13];
646         } else {
647             return &env->v7m.other_sp;
648         }
649     } else {
650         if (want_psp) {
651             return &env->v7m.other_ss_psp;
652         } else {
653             return &env->v7m.other_ss_msp;
654         }
655     }
656 }
657 
arm_v7m_load_vector(ARMCPU * cpu,int exc,bool targets_secure,uint32_t * pvec)658 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
659                                 uint32_t *pvec)
660 {
661     CPUState *cs = CPU(cpu);
662     CPUARMState *env = &cpu->env;
663     MemTxResult result;
664     uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
665     uint32_t vector_entry;
666     MemTxAttrs attrs = {};
667     ARMMMUIdx mmu_idx;
668     bool exc_secure;
669 
670     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
671 
672     /*
673      * We don't do a get_phys_addr() here because the rules for vector
674      * loads are special: they always use the default memory map, and
675      * the default memory map permits reads from all addresses.
676      * Since there's no easy way to pass through to pmsav8_mpu_lookup()
677      * that we want this special case which would always say "yes",
678      * we just do the SAU lookup here followed by a direct physical load.
679      */
680     attrs.secure = targets_secure;
681     attrs.user = false;
682 
683     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
684         V8M_SAttributes sattrs = {};
685 
686         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
687         if (sattrs.ns) {
688             attrs.secure = false;
689         } else if (!targets_secure) {
690             /*
691              * NS access to S memory: the underlying exception which we escalate
692              * to HardFault is SecureFault, which always targets Secure.
693              */
694             exc_secure = true;
695             goto load_fail;
696         }
697     }
698 
699     vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
700                                      attrs, &result);
701     if (result != MEMTX_OK) {
702         /*
703          * Underlying exception is BusFault: its target security state
704          * depends on BFHFNMINS.
705          */
706         exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
707         goto load_fail;
708     }
709     *pvec = vector_entry;
710     return true;
711 
712 load_fail:
713     /*
714      * All vector table fetch fails are reported as HardFault, with
715      * HFSR.VECTTBL and .FORCED set. (FORCED is set because
716      * technically the underlying exception is a SecureFault or BusFault
717      * that is escalated to HardFault.) This is a terminal exception,
718      * so we will either take the HardFault immediately or else enter
719      * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
720      * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
721      * secure); otherwise it targets the same security state as the
722      * underlying exception.
723      */
724     if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
725         exc_secure = true;
726     }
727     env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
728     armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
729     return false;
730 }
731 
v7m_integrity_sig(CPUARMState * env,uint32_t lr)732 static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
733 {
734     /*
735      * Return the integrity signature value for the callee-saves
736      * stack frame section. @lr is the exception return payload/LR value
737      * whose FType bit forms bit 0 of the signature if FP is present.
738      */
739     uint32_t sig = 0xfefa125a;
740 
741     if (!cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))
742         || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
743         sig |= 1;
744     }
745     return sig;
746 }
747 
v7m_push_callee_stack(ARMCPU * cpu,uint32_t lr,bool dotailchain,bool ignore_faults)748 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
749                                   bool ignore_faults)
750 {
751     /*
752      * For v8M, push the callee-saves register part of the stack frame.
753      * Compare the v8M pseudocode PushCalleeStack().
754      * In the tailchaining case this may not be the current stack.
755      */
756     CPUARMState *env = &cpu->env;
757     uint32_t *frame_sp_p;
758     uint32_t frameptr;
759     ARMMMUIdx mmu_idx;
760     bool stacked_ok;
761     uint32_t limit;
762     bool want_psp;
763     uint32_t sig;
764     StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
765 
766     if (dotailchain) {
767         bool mode = lr & R_V7M_EXCRET_MODE_MASK;
768         bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
769             !mode;
770 
771         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
772         frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
773                                     lr & R_V7M_EXCRET_SPSEL_MASK);
774         want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
775         if (want_psp) {
776             limit = env->v7m.psplim[M_REG_S];
777         } else {
778             limit = env->v7m.msplim[M_REG_S];
779         }
780     } else {
781         mmu_idx = arm_mmu_idx(env);
782         frame_sp_p = &env->regs[13];
783         limit = v7m_sp_limit(env);
784     }
785 
786     frameptr = *frame_sp_p - 0x28;
787     if (frameptr < limit) {
788         /*
789          * Stack limit failure: set SP to the limit value, and generate
790          * STKOF UsageFault. Stack pushes below the limit must not be
791          * performed. It is IMPDEF whether pushes above the limit are
792          * performed; we choose not to.
793          */
794         qemu_log_mask(CPU_LOG_INT,
795                       "...STKOF during callee-saves register stacking\n");
796         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
797         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
798                                 env->v7m.secure);
799         *frame_sp_p = limit;
800         return true;
801     }
802 
803     /*
804      * Write as much of the stack frame as we can. A write failure may
805      * cause us to pend a derived exception.
806      */
807     sig = v7m_integrity_sig(env, lr);
808     stacked_ok =
809         v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
810         v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
811         v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
812         v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
813         v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
814         v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
815         v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
816         v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
817         v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
818 
819     /* Update SP regardless of whether any of the stack accesses failed. */
820     *frame_sp_p = frameptr;
821 
822     return !stacked_ok;
823 }
824 
v7m_exception_taken(ARMCPU * cpu,uint32_t lr,bool dotailchain,bool ignore_stackfaults)825 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
826                                 bool ignore_stackfaults)
827 {
828     /*
829      * Do the "take the exception" parts of exception entry,
830      * but not the pushing of state to the stack. This is
831      * similar to the pseudocode ExceptionTaken() function.
832      */
833     CPUARMState *env = &cpu->env;
834     uint32_t addr;
835     bool targets_secure;
836     int exc;
837     bool push_failed = false;
838 
839     armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
840     qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
841                   targets_secure ? "secure" : "nonsecure", exc);
842 
843     if (dotailchain) {
844         /* Sanitize LR FType and PREFIX bits */
845         if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
846             lr |= R_V7M_EXCRET_FTYPE_MASK;
847         }
848         lr = deposit32(lr, 24, 8, 0xff);
849     }
850 
851     if (arm_feature(env, ARM_FEATURE_V8)) {
852         if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
853             (lr & R_V7M_EXCRET_S_MASK)) {
854             /*
855              * The background code (the owner of the registers in the
856              * exception frame) is Secure. This means it may either already
857              * have or now needs to push callee-saves registers.
858              */
859             if (targets_secure) {
860                 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
861                     /*
862                      * We took an exception from Secure to NonSecure
863                      * (which means the callee-saved registers got stacked)
864                      * and are now tailchaining to a Secure exception.
865                      * Clear DCRS so eventual return from this Secure
866                      * exception unstacks the callee-saved registers.
867                      */
868                     lr &= ~R_V7M_EXCRET_DCRS_MASK;
869                 }
870             } else {
871                 /*
872                  * We're going to a non-secure exception; push the
873                  * callee-saves registers to the stack now, if they're
874                  * not already saved.
875                  */
876                 if (lr & R_V7M_EXCRET_DCRS_MASK &&
877                     !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
878                     push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
879                                                         ignore_stackfaults);
880                 }
881                 lr |= R_V7M_EXCRET_DCRS_MASK;
882             }
883         }
884 
885         lr &= ~R_V7M_EXCRET_ES_MASK;
886         if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
887             lr |= R_V7M_EXCRET_ES_MASK;
888         }
889         lr &= ~R_V7M_EXCRET_SPSEL_MASK;
890         if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
891             lr |= R_V7M_EXCRET_SPSEL_MASK;
892         }
893 
894         /*
895          * Clear registers if necessary to prevent non-secure exception
896          * code being able to see register values from secure code.
897          * Where register values become architecturally UNKNOWN we leave
898          * them with their previous values.
899          */
900         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
901             if (!targets_secure) {
902                 /*
903                  * Always clear the caller-saved registers (they have been
904                  * pushed to the stack earlier in v7m_push_stack()).
905                  * Clear callee-saved registers if the background code is
906                  * Secure (in which case these regs were saved in
907                  * v7m_push_callee_stack()).
908                  */
909                 int i;
910 
911                 for (i = 0; i < 13; i++) {
912                     /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
913                     if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
914                         env->regs[i] = 0;
915                     }
916                 }
917                 /* Clear EAPSR */
918                 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
919             }
920         }
921     }
922 
923     if (push_failed && !ignore_stackfaults) {
924         /*
925          * Derived exception on callee-saves register stacking:
926          * we might now want to take a different exception which
927          * targets a different security state, so try again from the top.
928          */
929         qemu_log_mask(CPU_LOG_INT,
930                       "...derived exception on callee-saves register stacking");
931         v7m_exception_taken(cpu, lr, true, true);
932         return;
933     }
934 
935     if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
936         /* Vector load failed: derived exception */
937         qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
938         v7m_exception_taken(cpu, lr, true, true);
939         return;
940     }
941 
942     /*
943      * Now we've done everything that might cause a derived exception
944      * we can go ahead and activate whichever exception we're going to
945      * take (which might now be the derived exception).
946      */
947     armv7m_nvic_acknowledge_irq(env->nvic);
948 
949     /* Switch to target security state -- must do this before writing SPSEL */
950     switch_v7m_security_state(env, targets_secure);
951     write_v7m_control_spsel(env, 0);
952     arm_clear_exclusive(env);
953     /* Clear SFPA and FPCA (has no effect if no FPU) */
954     env->v7m.control[M_REG_S] &=
955         ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
956     /* Clear IT bits */
957     env->condexec_bits = 0;
958     env->regs[14] = lr;
959     env->regs[15] = addr & 0xfffffffe;
960     env->thumb = addr & 1;
961     arm_rebuild_hflags(env);
962 }
963 
v7m_update_fpccr(CPUARMState * env,uint32_t frameptr,bool apply_splim)964 static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
965                              bool apply_splim)
966 {
967     /*
968      * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
969      * that we will need later in order to do lazy FP reg stacking.
970      */
971     bool is_secure = env->v7m.secure;
972     void *nvic = env->nvic;
973     /*
974      * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
975      * are banked and we want to update the bit in the bank for the
976      * current security state; and in one case we want to specifically
977      * update the NS banked version of a bit even if we are secure.
978      */
979     uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
980     uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
981     uint32_t *fpccr = &env->v7m.fpccr[is_secure];
982     bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
983 
984     env->v7m.fpcar[is_secure] = frameptr & ~0x7;
985 
986     if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
987         bool splimviol;
988         uint32_t splim = v7m_sp_limit(env);
989         bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
990             (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
991 
992         splimviol = !ign && frameptr < splim;
993         *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
994     }
995 
996     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
997 
998     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
999 
1000     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
1001 
1002     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
1003                         !arm_v7m_is_handler_mode(env));
1004 
1005     hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
1006     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
1007 
1008     bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
1009     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
1010 
1011     mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
1012     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
1013 
1014     ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
1015     *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
1016 
1017     monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
1018     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
1019 
1020     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1021         s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
1022         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
1023 
1024         sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
1025         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
1026     }
1027 }
1028 
HELPER(v7m_vlstm)1029 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
1030 {
1031     /* fptr is the value of Rn, the frame pointer we store the FP regs to */
1032     bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1033     bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
1034     uintptr_t ra = GETPC();
1035 
1036     assert(env->v7m.secure);
1037 
1038     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1039         return;
1040     }
1041 
1042     /* Check access to the coprocessor is permitted */
1043     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1044         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1045     }
1046 
1047     if (lspact) {
1048         /* LSPACT should not be active when there is active FP state */
1049         raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
1050     }
1051 
1052     if (fptr & 7) {
1053         raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1054     }
1055 
1056     /*
1057      * Note that we do not use v7m_stack_write() here, because the
1058      * accesses should not set the FSR bits for stacking errors if they
1059      * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
1060      * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
1061      * and longjmp out.
1062      */
1063     if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1064         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1065         int i;
1066 
1067         for (i = 0; i < (ts ? 32 : 16); i += 2) {
1068             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1069             uint32_t faddr = fptr + 4 * i;
1070             uint32_t slo = extract64(dn, 0, 32);
1071             uint32_t shi = extract64(dn, 32, 32);
1072 
1073             if (i >= 16) {
1074                 faddr += 8; /* skip the slot for the FPSCR */
1075             }
1076             cpu_stl_data_ra(env, faddr, slo, ra);
1077             cpu_stl_data_ra(env, faddr + 4, shi, ra);
1078         }
1079         cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
1080 
1081         /*
1082          * If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
1083          * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1084          */
1085         if (ts) {
1086             for (i = 0; i < 32; i += 2) {
1087                 *aa32_vfp_dreg(env, i / 2) = 0;
1088             }
1089             vfp_set_fpscr(env, 0);
1090         }
1091     } else {
1092         v7m_update_fpccr(env, fptr, false);
1093     }
1094 
1095     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
1096 }
1097 
HELPER(v7m_vlldm)1098 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
1099 {
1100     uintptr_t ra = GETPC();
1101 
1102     /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1103     assert(env->v7m.secure);
1104 
1105     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1106         return;
1107     }
1108 
1109     /* Check access to the coprocessor is permitted */
1110     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1111         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1112     }
1113 
1114     if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1115         /* State in FP is still valid */
1116         env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
1117     } else {
1118         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1119         int i;
1120         uint32_t fpscr;
1121 
1122         if (fptr & 7) {
1123             raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1124         }
1125 
1126         for (i = 0; i < (ts ? 32 : 16); i += 2) {
1127             uint32_t slo, shi;
1128             uint64_t dn;
1129             uint32_t faddr = fptr + 4 * i;
1130 
1131             if (i >= 16) {
1132                 faddr += 8; /* skip the slot for the FPSCR */
1133             }
1134 
1135             slo = cpu_ldl_data_ra(env, faddr, ra);
1136             shi = cpu_ldl_data_ra(env, faddr + 4, ra);
1137 
1138             dn = (uint64_t) shi << 32 | slo;
1139             *aa32_vfp_dreg(env, i / 2) = dn;
1140         }
1141         fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
1142         vfp_set_fpscr(env, fpscr);
1143     }
1144 
1145     env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
1146 }
1147 
v7m_push_stack(ARMCPU * cpu)1148 static bool v7m_push_stack(ARMCPU *cpu)
1149 {
1150     /*
1151      * Do the "set up stack frame" part of exception entry,
1152      * similar to pseudocode PushStack().
1153      * Return true if we generate a derived exception (and so
1154      * should ignore further stack faults trying to process
1155      * that derived exception.)
1156      */
1157     bool stacked_ok = true, limitviol = false;
1158     CPUARMState *env = &cpu->env;
1159     uint32_t xpsr = xpsr_read(env);
1160     uint32_t frameptr = env->regs[13];
1161     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1162     uint32_t framesize;
1163     bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
1164 
1165     if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
1166         (env->v7m.secure || nsacr_cp10)) {
1167         if (env->v7m.secure &&
1168             env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
1169             framesize = 0xa8;
1170         } else {
1171             framesize = 0x68;
1172         }
1173     } else {
1174         framesize = 0x20;
1175     }
1176 
1177     /* Align stack pointer if the guest wants that */
1178     if ((frameptr & 4) &&
1179         (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
1180         frameptr -= 4;
1181         xpsr |= XPSR_SPREALIGN;
1182     }
1183 
1184     xpsr &= ~XPSR_SFPA;
1185     if (env->v7m.secure &&
1186         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1187         xpsr |= XPSR_SFPA;
1188     }
1189 
1190     frameptr -= framesize;
1191 
1192     if (arm_feature(env, ARM_FEATURE_V8)) {
1193         uint32_t limit = v7m_sp_limit(env);
1194 
1195         if (frameptr < limit) {
1196             /*
1197              * Stack limit failure: set SP to the limit value, and generate
1198              * STKOF UsageFault. Stack pushes below the limit must not be
1199              * performed. It is IMPDEF whether pushes above the limit are
1200              * performed; we choose not to.
1201              */
1202             qemu_log_mask(CPU_LOG_INT,
1203                           "...STKOF during stacking\n");
1204             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
1205             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1206                                     env->v7m.secure);
1207             env->regs[13] = limit;
1208             /*
1209              * We won't try to perform any further memory accesses but
1210              * we must continue through the following code to check for
1211              * permission faults during FPU state preservation, and we
1212              * must update FPCCR if lazy stacking is enabled.
1213              */
1214             limitviol = true;
1215             stacked_ok = false;
1216         }
1217     }
1218 
1219     /*
1220      * Write as much of the stack frame as we can. If we fail a stack
1221      * write this will result in a derived exception being pended
1222      * (which may be taken in preference to the one we started with
1223      * if it has higher priority).
1224      */
1225     stacked_ok = stacked_ok &&
1226         v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
1227         v7m_stack_write(cpu, frameptr + 4, env->regs[1],
1228                         mmu_idx, STACK_NORMAL) &&
1229         v7m_stack_write(cpu, frameptr + 8, env->regs[2],
1230                         mmu_idx, STACK_NORMAL) &&
1231         v7m_stack_write(cpu, frameptr + 12, env->regs[3],
1232                         mmu_idx, STACK_NORMAL) &&
1233         v7m_stack_write(cpu, frameptr + 16, env->regs[12],
1234                         mmu_idx, STACK_NORMAL) &&
1235         v7m_stack_write(cpu, frameptr + 20, env->regs[14],
1236                         mmu_idx, STACK_NORMAL) &&
1237         v7m_stack_write(cpu, frameptr + 24, env->regs[15],
1238                         mmu_idx, STACK_NORMAL) &&
1239         v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
1240 
1241     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
1242         /* FPU is active, try to save its registers */
1243         bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1244         bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
1245 
1246         if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1247             qemu_log_mask(CPU_LOG_INT,
1248                           "...SecureFault because LSPACT and FPCA both set\n");
1249             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1250             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1251         } else if (!env->v7m.secure && !nsacr_cp10) {
1252             qemu_log_mask(CPU_LOG_INT,
1253                           "...Secure UsageFault with CFSR.NOCP because "
1254                           "NSACR.CP10 prevents stacking FP regs\n");
1255             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
1256             env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1257         } else {
1258             if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1259                 /* Lazy stacking disabled, save registers now */
1260                 int i;
1261                 bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
1262                                                  arm_current_el(env) != 0);
1263 
1264                 if (stacked_ok && !cpacr_pass) {
1265                     /*
1266                      * Take UsageFault if CPACR forbids access. The pseudocode
1267                      * here does a full CheckCPEnabled() but we know the NSACR
1268                      * check can never fail as we have already handled that.
1269                      */
1270                     qemu_log_mask(CPU_LOG_INT,
1271                                   "...UsageFault with CFSR.NOCP because "
1272                                   "CPACR.CP10 prevents stacking FP regs\n");
1273                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1274                                             env->v7m.secure);
1275                     env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
1276                     stacked_ok = false;
1277                 }
1278 
1279                 for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1280                     uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1281                     uint32_t faddr = frameptr + 0x20 + 4 * i;
1282                     uint32_t slo = extract64(dn, 0, 32);
1283                     uint32_t shi = extract64(dn, 32, 32);
1284 
1285                     if (i >= 16) {
1286                         faddr += 8; /* skip the slot for the FPSCR */
1287                     }
1288                     stacked_ok = stacked_ok &&
1289                         v7m_stack_write(cpu, faddr, slo,
1290                                         mmu_idx, STACK_NORMAL) &&
1291                         v7m_stack_write(cpu, faddr + 4, shi,
1292                                         mmu_idx, STACK_NORMAL);
1293                 }
1294                 stacked_ok = stacked_ok &&
1295                     v7m_stack_write(cpu, frameptr + 0x60,
1296                                     vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
1297                 if (cpacr_pass) {
1298                     for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1299                         *aa32_vfp_dreg(env, i / 2) = 0;
1300                     }
1301                     vfp_set_fpscr(env, 0);
1302                 }
1303             } else {
1304                 /* Lazy stacking enabled, save necessary info to stack later */
1305                 v7m_update_fpccr(env, frameptr + 0x20, true);
1306             }
1307         }
1308     }
1309 
1310     /*
1311      * If we broke a stack limit then SP was already updated earlier;
1312      * otherwise we update SP regardless of whether any of the stack
1313      * accesses failed or we took some other kind of fault.
1314      */
1315     if (!limitviol) {
1316         env->regs[13] = frameptr;
1317     }
1318 
1319     return !stacked_ok;
1320 }
1321 
do_v7m_exception_exit(ARMCPU * cpu)1322 static void do_v7m_exception_exit(ARMCPU *cpu)
1323 {
1324     CPUARMState *env = &cpu->env;
1325     uint32_t excret;
1326     uint32_t xpsr, xpsr_mask;
1327     bool ufault = false;
1328     bool sfault = false;
1329     bool return_to_sp_process;
1330     bool return_to_handler;
1331     bool rettobase = false;
1332     bool exc_secure = false;
1333     bool return_to_secure;
1334     bool ftype;
1335     bool restore_s16_s31;
1336 
1337     /*
1338      * If we're not in Handler mode then jumps to magic exception-exit
1339      * addresses don't have magic behaviour. However for the v8M
1340      * security extensions the magic secure-function-return has to
1341      * work in thread mode too, so to avoid doing an extra check in
1342      * the generated code we allow exception-exit magic to also cause the
1343      * internal exception and bring us here in thread mode. Correct code
1344      * will never try to do this (the following insn fetch will always
1345      * fault) so we the overhead of having taken an unnecessary exception
1346      * doesn't matter.
1347      */
1348     if (!arm_v7m_is_handler_mode(env)) {
1349         return;
1350     }
1351 
1352     /*
1353      * In the spec pseudocode ExceptionReturn() is called directly
1354      * from BXWritePC() and gets the full target PC value including
1355      * bit zero. In QEMU's implementation we treat it as a normal
1356      * jump-to-register (which is then caught later on), and so split
1357      * the target value up between env->regs[15] and env->thumb in
1358      * gen_bx(). Reconstitute it.
1359      */
1360     excret = env->regs[15];
1361     if (env->thumb) {
1362         excret |= 1;
1363     }
1364 
1365     qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
1366                   " previous exception %d\n",
1367                   excret, env->v7m.exception);
1368 
1369     if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
1370         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
1371                       "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
1372                       excret);
1373     }
1374 
1375     ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
1376 
1377     if (!ftype && !cpu_isar_feature(aa32_vfp_simd, cpu)) {
1378         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
1379                       "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
1380                       "if FPU not present\n",
1381                       excret);
1382         ftype = true;
1383     }
1384 
1385     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1386         /*
1387          * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1388          * we pick which FAULTMASK to clear.
1389          */
1390         if (!env->v7m.secure &&
1391             ((excret & R_V7M_EXCRET_ES_MASK) ||
1392              !(excret & R_V7M_EXCRET_DCRS_MASK))) {
1393             sfault = 1;
1394             /* For all other purposes, treat ES as 0 (R_HXSR) */
1395             excret &= ~R_V7M_EXCRET_ES_MASK;
1396         }
1397         exc_secure = excret & R_V7M_EXCRET_ES_MASK;
1398     }
1399 
1400     if (env->v7m.exception != ARMV7M_EXCP_NMI) {
1401         /*
1402          * Auto-clear FAULTMASK on return from other than NMI.
1403          * If the security extension is implemented then this only
1404          * happens if the raw execution priority is >= 0; the
1405          * value of the ES bit in the exception return value indicates
1406          * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1407          */
1408         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1409             if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
1410                 env->v7m.faultmask[exc_secure] = 0;
1411             }
1412         } else {
1413             env->v7m.faultmask[M_REG_NS] = 0;
1414         }
1415     }
1416 
1417     switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
1418                                      exc_secure)) {
1419     case -1:
1420         /* attempt to exit an exception that isn't active */
1421         ufault = true;
1422         break;
1423     case 0:
1424         /* still an irq active now */
1425         break;
1426     case 1:
1427         /*
1428          * We returned to base exception level, no nesting.
1429          * (In the pseudocode this is written using "NestedActivation != 1"
1430          * where we have 'rettobase == false'.)
1431          */
1432         rettobase = true;
1433         break;
1434     default:
1435         g_assert_not_reached();
1436     }
1437 
1438     return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
1439     return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
1440     return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
1441         (excret & R_V7M_EXCRET_S_MASK);
1442 
1443     if (arm_feature(env, ARM_FEATURE_V8)) {
1444         if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1445             /*
1446              * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1447              * we choose to take the UsageFault.
1448              */
1449             if ((excret & R_V7M_EXCRET_S_MASK) ||
1450                 (excret & R_V7M_EXCRET_ES_MASK) ||
1451                 !(excret & R_V7M_EXCRET_DCRS_MASK)) {
1452                 ufault = true;
1453             }
1454         }
1455         if (excret & R_V7M_EXCRET_RES0_MASK) {
1456             ufault = true;
1457         }
1458     } else {
1459         /* For v7M we only recognize certain combinations of the low bits */
1460         switch (excret & 0xf) {
1461         case 1: /* Return to Handler */
1462             break;
1463         case 13: /* Return to Thread using Process stack */
1464         case 9: /* Return to Thread using Main stack */
1465             /*
1466              * We only need to check NONBASETHRDENA for v7M, because in
1467              * v8M this bit does not exist (it is RES1).
1468              */
1469             if (!rettobase &&
1470                 !(env->v7m.ccr[env->v7m.secure] &
1471                   R_V7M_CCR_NONBASETHRDENA_MASK)) {
1472                 ufault = true;
1473             }
1474             break;
1475         default:
1476             ufault = true;
1477         }
1478     }
1479 
1480     /*
1481      * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1482      * Handler mode (and will be until we write the new XPSR.Interrupt
1483      * field) this does not switch around the current stack pointer.
1484      * We must do this before we do any kind of tailchaining, including
1485      * for the derived exceptions on integrity check failures, or we will
1486      * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1487      */
1488     write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
1489 
1490     /*
1491      * Clear scratch FP values left in caller saved registers; this
1492      * must happen before any kind of tail chaining.
1493      */
1494     if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
1495         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
1496         if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1497             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1498             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1499             qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1500                           "stackframe: error during lazy state deactivation\n");
1501             v7m_exception_taken(cpu, excret, true, false);
1502             return;
1503         } else {
1504             /* Clear s0..s15 and FPSCR */
1505             int i;
1506 
1507             for (i = 0; i < 16; i += 2) {
1508                 *aa32_vfp_dreg(env, i / 2) = 0;
1509             }
1510             vfp_set_fpscr(env, 0);
1511         }
1512     }
1513 
1514     if (sfault) {
1515         env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
1516         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1517         qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1518                       "stackframe: failed EXC_RETURN.ES validity check\n");
1519         v7m_exception_taken(cpu, excret, true, false);
1520         return;
1521     }
1522 
1523     if (ufault) {
1524         /*
1525          * Bad exception return: instead of popping the exception
1526          * stack, directly take a usage fault on the current stack.
1527          */
1528         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1529         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1530         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1531                       "stackframe: failed exception return integrity check\n");
1532         v7m_exception_taken(cpu, excret, true, false);
1533         return;
1534     }
1535 
1536     /*
1537      * Tailchaining: if there is currently a pending exception that
1538      * is high enough priority to preempt execution at the level we're
1539      * about to return to, then just directly take that exception now,
1540      * avoiding an unstack-and-then-stack. Note that now we have
1541      * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1542      * our current execution priority is already the execution priority we are
1543      * returning to -- none of the state we would unstack or set based on
1544      * the EXCRET value affects it.
1545      */
1546     if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
1547         qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
1548         v7m_exception_taken(cpu, excret, true, false);
1549         return;
1550     }
1551 
1552     switch_v7m_security_state(env, return_to_secure);
1553 
1554     {
1555         /*
1556          * The stack pointer we should be reading the exception frame from
1557          * depends on bits in the magic exception return type value (and
1558          * for v8M isn't necessarily the stack pointer we will eventually
1559          * end up resuming execution with). Get a pointer to the location
1560          * in the CPU state struct where the SP we need is currently being
1561          * stored; we will use and modify it in place.
1562          * We use this limited C variable scope so we don't accidentally
1563          * use 'frame_sp_p' after we do something that makes it invalid.
1564          */
1565         uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
1566                                               return_to_secure,
1567                                               !return_to_handler,
1568                                               return_to_sp_process);
1569         uint32_t frameptr = *frame_sp_p;
1570         bool pop_ok = true;
1571         ARMMMUIdx mmu_idx;
1572         bool return_to_priv = return_to_handler ||
1573             !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
1574 
1575         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
1576                                                         return_to_priv);
1577 
1578         if (!QEMU_IS_ALIGNED(frameptr, 8) &&
1579             arm_feature(env, ARM_FEATURE_V8)) {
1580             qemu_log_mask(LOG_GUEST_ERROR,
1581                           "M profile exception return with non-8-aligned SP "
1582                           "for destination state is UNPREDICTABLE\n");
1583         }
1584 
1585         /* Do we need to pop callee-saved registers? */
1586         if (return_to_secure &&
1587             ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
1588              (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
1589             uint32_t actual_sig;
1590 
1591             pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
1592 
1593             if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
1594                 /* Take a SecureFault on the current stack */
1595                 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
1596                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1597                 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1598                               "stackframe: failed exception return integrity "
1599                               "signature check\n");
1600                 v7m_exception_taken(cpu, excret, true, false);
1601                 return;
1602             }
1603 
1604             pop_ok = pop_ok &&
1605                 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
1606                 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
1607                 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
1608                 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
1609                 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
1610                 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
1611                 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
1612                 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
1613 
1614             frameptr += 0x28;
1615         }
1616 
1617         /* Pop registers */
1618         pop_ok = pop_ok &&
1619             v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
1620             v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
1621             v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
1622             v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
1623             v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
1624             v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
1625             v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
1626             v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
1627 
1628         if (!pop_ok) {
1629             /*
1630              * v7m_stack_read() pended a fault, so take it (as a tail
1631              * chained exception on the same stack frame)
1632              */
1633             qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
1634             v7m_exception_taken(cpu, excret, true, false);
1635             return;
1636         }
1637 
1638         /*
1639          * Returning from an exception with a PC with bit 0 set is defined
1640          * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1641          * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1642          * the lsbit, and there are several RTOSes out there which incorrectly
1643          * assume the r15 in the stack frame should be a Thumb-style "lsbit
1644          * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1645          * complain about the badly behaved guest.
1646          */
1647         if (env->regs[15] & 1) {
1648             env->regs[15] &= ~1U;
1649             if (!arm_feature(env, ARM_FEATURE_V8)) {
1650                 qemu_log_mask(LOG_GUEST_ERROR,
1651                               "M profile return from interrupt with misaligned "
1652                               "PC is UNPREDICTABLE on v7M\n");
1653             }
1654         }
1655 
1656         if (arm_feature(env, ARM_FEATURE_V8)) {
1657             /*
1658              * For v8M we have to check whether the xPSR exception field
1659              * matches the EXCRET value for return to handler/thread
1660              * before we commit to changing the SP and xPSR.
1661              */
1662             bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
1663             if (return_to_handler != will_be_handler) {
1664                 /*
1665                  * Take an INVPC UsageFault on the current stack.
1666                  * By this point we will have switched to the security state
1667                  * for the background state, so this UsageFault will target
1668                  * that state.
1669                  */
1670                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1671                                         env->v7m.secure);
1672                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1673                 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1674                               "stackframe: failed exception return integrity "
1675                               "check\n");
1676                 v7m_exception_taken(cpu, excret, true, false);
1677                 return;
1678             }
1679         }
1680 
1681         if (!ftype) {
1682             /* FP present and we need to handle it */
1683             if (!return_to_secure &&
1684                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
1685                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1686                 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1687                 qemu_log_mask(CPU_LOG_INT,
1688                               "...taking SecureFault on existing stackframe: "
1689                               "Secure LSPACT set but exception return is "
1690                               "not to secure state\n");
1691                 v7m_exception_taken(cpu, excret, true, false);
1692                 return;
1693             }
1694 
1695             restore_s16_s31 = return_to_secure &&
1696                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
1697 
1698             if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
1699                 /* State in FPU is still valid, just clear LSPACT */
1700                 env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
1701             } else {
1702                 int i;
1703                 uint32_t fpscr;
1704                 bool cpacr_pass, nsacr_pass;
1705 
1706                 cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
1707                                             return_to_priv);
1708                 nsacr_pass = return_to_secure ||
1709                     extract32(env->v7m.nsacr, 10, 1);
1710 
1711                 if (!cpacr_pass) {
1712                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1713                                             return_to_secure);
1714                     env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
1715                     qemu_log_mask(CPU_LOG_INT,
1716                                   "...taking UsageFault on existing "
1717                                   "stackframe: CPACR.CP10 prevents unstacking "
1718                                   "FP regs\n");
1719                     v7m_exception_taken(cpu, excret, true, false);
1720                     return;
1721                 } else if (!nsacr_pass) {
1722                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1723                     env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
1724                     qemu_log_mask(CPU_LOG_INT,
1725                                   "...taking Secure UsageFault on existing "
1726                                   "stackframe: NSACR.CP10 prevents unstacking "
1727                                   "FP regs\n");
1728                     v7m_exception_taken(cpu, excret, true, false);
1729                     return;
1730                 }
1731 
1732                 for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1733                     uint32_t slo, shi;
1734                     uint64_t dn;
1735                     uint32_t faddr = frameptr + 0x20 + 4 * i;
1736 
1737                     if (i >= 16) {
1738                         faddr += 8; /* Skip the slot for the FPSCR */
1739                     }
1740 
1741                     pop_ok = pop_ok &&
1742                         v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
1743                         v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
1744 
1745                     if (!pop_ok) {
1746                         break;
1747                     }
1748 
1749                     dn = (uint64_t)shi << 32 | slo;
1750                     *aa32_vfp_dreg(env, i / 2) = dn;
1751                 }
1752                 pop_ok = pop_ok &&
1753                     v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
1754                 if (pop_ok) {
1755                     vfp_set_fpscr(env, fpscr);
1756                 }
1757                 if (!pop_ok) {
1758                     /*
1759                      * These regs are 0 if security extension present;
1760                      * otherwise merely UNKNOWN. We zero always.
1761                      */
1762                     for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1763                         *aa32_vfp_dreg(env, i / 2) = 0;
1764                     }
1765                     vfp_set_fpscr(env, 0);
1766                 }
1767             }
1768         }
1769         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1770                                                V7M_CONTROL, FPCA, !ftype);
1771 
1772         /* Commit to consuming the stack frame */
1773         frameptr += 0x20;
1774         if (!ftype) {
1775             frameptr += 0x48;
1776             if (restore_s16_s31) {
1777                 frameptr += 0x40;
1778             }
1779         }
1780         /*
1781          * Undo stack alignment (the SPREALIGN bit indicates that the original
1782          * pre-exception SP was not 8-aligned and we added a padding word to
1783          * align it, so we undo this by ORing in the bit that increases it
1784          * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1785          * would work too but a logical OR is how the pseudocode specifies it.)
1786          */
1787         if (xpsr & XPSR_SPREALIGN) {
1788             frameptr |= 4;
1789         }
1790         *frame_sp_p = frameptr;
1791     }
1792 
1793     xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
1794     if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
1795         xpsr_mask &= ~XPSR_GE;
1796     }
1797     /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1798     xpsr_write(env, xpsr, xpsr_mask);
1799 
1800     if (env->v7m.secure) {
1801         bool sfpa = xpsr & XPSR_SFPA;
1802 
1803         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1804                                                V7M_CONTROL, SFPA, sfpa);
1805     }
1806 
1807     /*
1808      * The restored xPSR exception field will be zero if we're
1809      * resuming in Thread mode. If that doesn't match what the
1810      * exception return excret specified then this is a UsageFault.
1811      * v7M requires we make this check here; v8M did it earlier.
1812      */
1813     if (return_to_handler != arm_v7m_is_handler_mode(env)) {
1814         /*
1815          * Take an INVPC UsageFault by pushing the stack again;
1816          * we know we're v7M so this is never a Secure UsageFault.
1817          */
1818         bool ignore_stackfaults;
1819 
1820         assert(!arm_feature(env, ARM_FEATURE_V8));
1821         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
1822         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1823         ignore_stackfaults = v7m_push_stack(cpu);
1824         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
1825                       "failed exception return integrity check\n");
1826         v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
1827         return;
1828     }
1829 
1830     /* Otherwise, we have a successful exception exit. */
1831     arm_clear_exclusive(env);
1832     arm_rebuild_hflags(env);
1833     qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
1834 }
1835 
do_v7m_function_return(ARMCPU * cpu)1836 static bool do_v7m_function_return(ARMCPU *cpu)
1837 {
1838     /*
1839      * v8M security extensions magic function return.
1840      * We may either:
1841      *  (1) throw an exception (longjump)
1842      *  (2) return true if we successfully handled the function return
1843      *  (3) return false if we failed a consistency check and have
1844      *      pended a UsageFault that needs to be taken now
1845      *
1846      * At this point the magic return value is split between env->regs[15]
1847      * and env->thumb. We don't bother to reconstitute it because we don't
1848      * need it (all values are handled the same way).
1849      */
1850     CPUARMState *env = &cpu->env;
1851     uint32_t newpc, newpsr, newpsr_exc;
1852 
1853     qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
1854 
1855     {
1856         bool threadmode, spsel;
1857         TCGMemOpIdx oi;
1858         ARMMMUIdx mmu_idx;
1859         uint32_t *frame_sp_p;
1860         uint32_t frameptr;
1861 
1862         /* Pull the return address and IPSR from the Secure stack */
1863         threadmode = !arm_v7m_is_handler_mode(env);
1864         spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
1865 
1866         frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
1867         frameptr = *frame_sp_p;
1868 
1869         /*
1870          * These loads may throw an exception (for MPU faults). We want to
1871          * do them as secure, so work out what MMU index that is.
1872          */
1873         mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1874         oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
1875         newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
1876         newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
1877 
1878         /* Consistency checks on new IPSR */
1879         newpsr_exc = newpsr & XPSR_EXCP;
1880         if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
1881               (env->v7m.exception == 1 && newpsr_exc != 0))) {
1882             /* Pend the fault and tell our caller to take it */
1883             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1884             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1885                                     env->v7m.secure);
1886             qemu_log_mask(CPU_LOG_INT,
1887                           "...taking INVPC UsageFault: "
1888                           "IPSR consistency check failed\n");
1889             return false;
1890         }
1891 
1892         *frame_sp_p = frameptr + 8;
1893     }
1894 
1895     /* This invalidates frame_sp_p */
1896     switch_v7m_security_state(env, true);
1897     env->v7m.exception = newpsr_exc;
1898     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1899     if (newpsr & XPSR_SFPA) {
1900         env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
1901     }
1902     xpsr_write(env, 0, XPSR_IT);
1903     env->thumb = newpc & 1;
1904     env->regs[15] = newpc & ~1;
1905     arm_rebuild_hflags(env);
1906 
1907     qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
1908     return true;
1909 }
1910 
v7m_read_half_insn(ARMCPU * cpu,ARMMMUIdx mmu_idx,uint32_t addr,uint16_t * insn)1911 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1912                                uint32_t addr, uint16_t *insn)
1913 {
1914     /*
1915      * Load a 16-bit portion of a v7M instruction, returning true on success,
1916      * or false on failure (in which case we will have pended the appropriate
1917      * exception).
1918      * We need to do the instruction fetch's MPU and SAU checks
1919      * like this because there is no MMU index that would allow
1920      * doing the load with a single function call. Instead we must
1921      * first check that the security attributes permit the load
1922      * and that they don't mismatch on the two halves of the instruction,
1923      * and then we do the load as a secure load (ie using the security
1924      * attributes of the address, not the CPU, as architecturally required).
1925      */
1926     CPUState *cs = CPU(cpu);
1927     CPUARMState *env = &cpu->env;
1928     V8M_SAttributes sattrs = {};
1929     MemTxAttrs attrs = {};
1930     ARMMMUFaultInfo fi = {};
1931     MemTxResult txres;
1932     target_ulong page_size;
1933     hwaddr physaddr;
1934     int prot;
1935 
1936     v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
1937     if (!sattrs.nsc || sattrs.ns) {
1938         /*
1939          * This must be the second half of the insn, and it straddles a
1940          * region boundary with the second half not being S&NSC.
1941          */
1942         env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
1943         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1944         qemu_log_mask(CPU_LOG_INT,
1945                       "...really SecureFault with SFSR.INVEP\n");
1946         return false;
1947     }
1948     if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
1949                       &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
1950         /* the MPU lookup failed */
1951         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
1952         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
1953         qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
1954         return false;
1955     }
1956     *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
1957                                  attrs, &txres);
1958     if (txres != MEMTX_OK) {
1959         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
1960         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
1961         qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
1962         return false;
1963     }
1964     return true;
1965 }
1966 
v7m_handle_execute_nsc(ARMCPU * cpu)1967 static bool v7m_handle_execute_nsc(ARMCPU *cpu)
1968 {
1969     /*
1970      * Check whether this attempt to execute code in a Secure & NS-Callable
1971      * memory region is for an SG instruction; if so, then emulate the
1972      * effect of the SG instruction and return true. Otherwise pend
1973      * the correct kind of exception and return false.
1974      */
1975     CPUARMState *env = &cpu->env;
1976     ARMMMUIdx mmu_idx;
1977     uint16_t insn;
1978 
1979     /*
1980      * We should never get here unless get_phys_addr_pmsav8() caused
1981      * an exception for NS executing in S&NSC memory.
1982      */
1983     assert(!env->v7m.secure);
1984     assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
1985 
1986     /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
1987     mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1988 
1989     if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
1990         return false;
1991     }
1992 
1993     if (!env->thumb) {
1994         goto gen_invep;
1995     }
1996 
1997     if (insn != 0xe97f) {
1998         /*
1999          * Not an SG instruction first half (we choose the IMPDEF
2000          * early-SG-check option).
2001          */
2002         goto gen_invep;
2003     }
2004 
2005     if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
2006         return false;
2007     }
2008 
2009     if (insn != 0xe97f) {
2010         /*
2011          * Not an SG instruction second half (yes, both halves of the SG
2012          * insn have the same hex value)
2013          */
2014         goto gen_invep;
2015     }
2016 
2017     /*
2018      * OK, we have confirmed that we really have an SG instruction.
2019      * We know we're NS in S memory so don't need to repeat those checks.
2020      */
2021     qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
2022                   ", executing it\n", env->regs[15]);
2023     env->regs[14] &= ~1;
2024     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2025     switch_v7m_security_state(env, true);
2026     xpsr_write(env, 0, XPSR_IT);
2027     env->regs[15] += 4;
2028     arm_rebuild_hflags(env);
2029     return true;
2030 
2031 gen_invep:
2032     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2033     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2034     qemu_log_mask(CPU_LOG_INT,
2035                   "...really SecureFault with SFSR.INVEP\n");
2036     return false;
2037 }
2038 
arm_v7m_cpu_do_interrupt(CPUState * cs)2039 void arm_v7m_cpu_do_interrupt(CPUState *cs)
2040 {
2041     ARMCPU *cpu = ARM_CPU(cs);
2042     CPUARMState *env = &cpu->env;
2043     uint32_t lr;
2044     bool ignore_stackfaults;
2045 
2046     arm_log_exception(cs->exception_index);
2047 
2048     /*
2049      * For exceptions we just mark as pending on the NVIC, and let that
2050      * handle it.
2051      */
2052     switch (cs->exception_index) {
2053     case EXCP_UDEF:
2054         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2055         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
2056         break;
2057     case EXCP_NOCP:
2058     {
2059         /*
2060          * NOCP might be directed to something other than the current
2061          * security state if this fault is because of NSACR; we indicate
2062          * the target security state using exception.target_el.
2063          */
2064         int target_secstate;
2065 
2066         if (env->exception.target_el == 3) {
2067             target_secstate = M_REG_S;
2068         } else {
2069             target_secstate = env->v7m.secure;
2070         }
2071         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
2072         env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
2073         break;
2074     }
2075     case EXCP_INVSTATE:
2076         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2077         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
2078         break;
2079     case EXCP_STKOF:
2080         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2081         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
2082         break;
2083     case EXCP_LSERR:
2084         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2085         env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
2086         break;
2087     case EXCP_UNALIGNED:
2088         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2089         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2090         break;
2091     case EXCP_SWI:
2092         /* The PC already points to the next instruction.  */
2093         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
2094         break;
2095     case EXCP_PREFETCH_ABORT:
2096     case EXCP_DATA_ABORT:
2097         /*
2098          * Note that for M profile we don't have a guest facing FSR, but
2099          * the env->exception.fsr will be populated by the code that
2100          * raises the fault, in the A profile short-descriptor format.
2101          */
2102         switch (env->exception.fsr & 0xf) {
2103         case M_FAKE_FSR_NSC_EXEC:
2104             /*
2105              * Exception generated when we try to execute code at an address
2106              * which is marked as Secure & Non-Secure Callable and the CPU
2107              * is in the Non-Secure state. The only instruction which can
2108              * be executed like this is SG (and that only if both halves of
2109              * the SG instruction have the same security attributes.)
2110              * Everything else must generate an INVEP SecureFault, so we
2111              * emulate the SG instruction here.
2112              */
2113             if (v7m_handle_execute_nsc(cpu)) {
2114                 return;
2115             }
2116             break;
2117         case M_FAKE_FSR_SFAULT:
2118             /*
2119              * Various flavours of SecureFault for attempts to execute or
2120              * access data in the wrong security state.
2121              */
2122             switch (cs->exception_index) {
2123             case EXCP_PREFETCH_ABORT:
2124                 if (env->v7m.secure) {
2125                     env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
2126                     qemu_log_mask(CPU_LOG_INT,
2127                                   "...really SecureFault with SFSR.INVTRAN\n");
2128                 } else {
2129                     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2130                     qemu_log_mask(CPU_LOG_INT,
2131                                   "...really SecureFault with SFSR.INVEP\n");
2132                 }
2133                 break;
2134             case EXCP_DATA_ABORT:
2135                 /* This must be an NS access to S memory */
2136                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2137                 qemu_log_mask(CPU_LOG_INT,
2138                               "...really SecureFault with SFSR.AUVIOL\n");
2139                 break;
2140             }
2141             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2142             break;
2143         case 0x8: /* External Abort */
2144             switch (cs->exception_index) {
2145             case EXCP_PREFETCH_ABORT:
2146                 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2147                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
2148                 break;
2149             case EXCP_DATA_ABORT:
2150                 env->v7m.cfsr[M_REG_NS] |=
2151                     (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2152                 env->v7m.bfar = env->exception.vaddress;
2153                 qemu_log_mask(CPU_LOG_INT,
2154                               "...with CFSR.PRECISERR and BFAR 0x%x\n",
2155                               env->v7m.bfar);
2156                 break;
2157             }
2158             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2159             break;
2160         default:
2161             /*
2162              * All other FSR values are either MPU faults or "can't happen
2163              * for M profile" cases.
2164              */
2165             switch (cs->exception_index) {
2166             case EXCP_PREFETCH_ABORT:
2167                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2168                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
2169                 break;
2170             case EXCP_DATA_ABORT:
2171                 env->v7m.cfsr[env->v7m.secure] |=
2172                     (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
2173                 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
2174                 qemu_log_mask(CPU_LOG_INT,
2175                               "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2176                               env->v7m.mmfar[env->v7m.secure]);
2177                 break;
2178             }
2179             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
2180                                     env->v7m.secure);
2181             break;
2182         }
2183         break;
2184     case EXCP_SEMIHOST:
2185         qemu_log_mask(CPU_LOG_INT,
2186                       "...handling as semihosting call 0x%x\n",
2187                       env->regs[0]);
2188         env->regs[0] = do_arm_semihosting(env);
2189         env->regs[15] += env->thumb ? 2 : 4;
2190         return;
2191     case EXCP_BKPT:
2192         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
2193         break;
2194     case EXCP_IRQ:
2195         break;
2196     case EXCP_EXCEPTION_EXIT:
2197         if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
2198             /* Must be v8M security extension function return */
2199             assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
2200             assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2201             if (do_v7m_function_return(cpu)) {
2202                 return;
2203             }
2204         } else {
2205             do_v7m_exception_exit(cpu);
2206             return;
2207         }
2208         break;
2209     case EXCP_LAZYFP:
2210         /*
2211          * We already pended the specific exception in the NVIC in the
2212          * v7m_preserve_fp_state() helper function.
2213          */
2214         break;
2215     default:
2216         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
2217         return; /* Never happens.  Keep compiler happy.  */
2218     }
2219 
2220     if (arm_feature(env, ARM_FEATURE_V8)) {
2221         lr = R_V7M_EXCRET_RES1_MASK |
2222             R_V7M_EXCRET_DCRS_MASK;
2223         /*
2224          * The S bit indicates whether we should return to Secure
2225          * or NonSecure (ie our current state).
2226          * The ES bit indicates whether we're taking this exception
2227          * to Secure or NonSecure (ie our target state). We set it
2228          * later, in v7m_exception_taken().
2229          * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2230          * This corresponds to the ARM ARM pseudocode for v8M setting
2231          * some LR bits in PushStack() and some in ExceptionTaken();
2232          * the distinction matters for the tailchain cases where we
2233          * can take an exception without pushing the stack.
2234          */
2235         if (env->v7m.secure) {
2236             lr |= R_V7M_EXCRET_S_MASK;
2237         }
2238     } else {
2239         lr = R_V7M_EXCRET_RES1_MASK |
2240             R_V7M_EXCRET_S_MASK |
2241             R_V7M_EXCRET_DCRS_MASK |
2242             R_V7M_EXCRET_ES_MASK;
2243         if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
2244             lr |= R_V7M_EXCRET_SPSEL_MASK;
2245         }
2246     }
2247     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
2248         lr |= R_V7M_EXCRET_FTYPE_MASK;
2249     }
2250     if (!arm_v7m_is_handler_mode(env)) {
2251         lr |= R_V7M_EXCRET_MODE_MASK;
2252     }
2253 
2254     ignore_stackfaults = v7m_push_stack(cpu);
2255     v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
2256 }
2257 
HELPER(v7m_mrs)2258 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2259 {
2260     unsigned el = arm_current_el(env);
2261 
2262     /* First handle registers which unprivileged can read */
2263     switch (reg) {
2264     case 0 ... 7: /* xPSR sub-fields */
2265         return v7m_mrs_xpsr(env, reg, el);
2266     case 20: /* CONTROL */
2267         return v7m_mrs_control(env, env->v7m.secure);
2268     case 0x94: /* CONTROL_NS */
2269         /*
2270          * We have to handle this here because unprivileged Secure code
2271          * can read the NS CONTROL register.
2272          */
2273         if (!env->v7m.secure) {
2274             return 0;
2275         }
2276         return env->v7m.control[M_REG_NS] |
2277             (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
2278     }
2279 
2280     if (el == 0) {
2281         return 0; /* unprivileged reads others as zero */
2282     }
2283 
2284     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2285         switch (reg) {
2286         case 0x88: /* MSP_NS */
2287             if (!env->v7m.secure) {
2288                 return 0;
2289             }
2290             return env->v7m.other_ss_msp;
2291         case 0x89: /* PSP_NS */
2292             if (!env->v7m.secure) {
2293                 return 0;
2294             }
2295             return env->v7m.other_ss_psp;
2296         case 0x8a: /* MSPLIM_NS */
2297             if (!env->v7m.secure) {
2298                 return 0;
2299             }
2300             return env->v7m.msplim[M_REG_NS];
2301         case 0x8b: /* PSPLIM_NS */
2302             if (!env->v7m.secure) {
2303                 return 0;
2304             }
2305             return env->v7m.psplim[M_REG_NS];
2306         case 0x90: /* PRIMASK_NS */
2307             if (!env->v7m.secure) {
2308                 return 0;
2309             }
2310             return env->v7m.primask[M_REG_NS];
2311         case 0x91: /* BASEPRI_NS */
2312             if (!env->v7m.secure) {
2313                 return 0;
2314             }
2315             return env->v7m.basepri[M_REG_NS];
2316         case 0x93: /* FAULTMASK_NS */
2317             if (!env->v7m.secure) {
2318                 return 0;
2319             }
2320             return env->v7m.faultmask[M_REG_NS];
2321         case 0x98: /* SP_NS */
2322         {
2323             /*
2324              * This gives the non-secure SP selected based on whether we're
2325              * currently in handler mode or not, using the NS CONTROL.SPSEL.
2326              */
2327             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2328 
2329             if (!env->v7m.secure) {
2330                 return 0;
2331             }
2332             if (!arm_v7m_is_handler_mode(env) && spsel) {
2333                 return env->v7m.other_ss_psp;
2334             } else {
2335                 return env->v7m.other_ss_msp;
2336             }
2337         }
2338         default:
2339             break;
2340         }
2341     }
2342 
2343     switch (reg) {
2344     case 8: /* MSP */
2345         return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
2346     case 9: /* PSP */
2347         return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
2348     case 10: /* MSPLIM */
2349         if (!arm_feature(env, ARM_FEATURE_V8)) {
2350             goto bad_reg;
2351         }
2352         return env->v7m.msplim[env->v7m.secure];
2353     case 11: /* PSPLIM */
2354         if (!arm_feature(env, ARM_FEATURE_V8)) {
2355             goto bad_reg;
2356         }
2357         return env->v7m.psplim[env->v7m.secure];
2358     case 16: /* PRIMASK */
2359         return env->v7m.primask[env->v7m.secure];
2360     case 17: /* BASEPRI */
2361     case 18: /* BASEPRI_MAX */
2362         return env->v7m.basepri[env->v7m.secure];
2363     case 19: /* FAULTMASK */
2364         return env->v7m.faultmask[env->v7m.secure];
2365     default:
2366     bad_reg:
2367         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
2368                                        " register %d\n", reg);
2369         return 0;
2370     }
2371 }
2372 
HELPER(v7m_msr)2373 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
2374 {
2375     /*
2376      * We're passed bits [11..0] of the instruction; extract
2377      * SYSm and the mask bits.
2378      * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2379      * we choose to treat them as if the mask bits were valid.
2380      * NB that the pseudocode 'mask' variable is bits [11..10],
2381      * whereas ours is [11..8].
2382      */
2383     uint32_t mask = extract32(maskreg, 8, 4);
2384     uint32_t reg = extract32(maskreg, 0, 8);
2385     int cur_el = arm_current_el(env);
2386 
2387     if (cur_el == 0 && reg > 7 && reg != 20) {
2388         /*
2389          * only xPSR sub-fields and CONTROL.SFPA may be written by
2390          * unprivileged code
2391          */
2392         return;
2393     }
2394 
2395     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2396         switch (reg) {
2397         case 0x88: /* MSP_NS */
2398             if (!env->v7m.secure) {
2399                 return;
2400             }
2401             env->v7m.other_ss_msp = val;
2402             return;
2403         case 0x89: /* PSP_NS */
2404             if (!env->v7m.secure) {
2405                 return;
2406             }
2407             env->v7m.other_ss_psp = val;
2408             return;
2409         case 0x8a: /* MSPLIM_NS */
2410             if (!env->v7m.secure) {
2411                 return;
2412             }
2413             env->v7m.msplim[M_REG_NS] = val & ~7;
2414             return;
2415         case 0x8b: /* PSPLIM_NS */
2416             if (!env->v7m.secure) {
2417                 return;
2418             }
2419             env->v7m.psplim[M_REG_NS] = val & ~7;
2420             return;
2421         case 0x90: /* PRIMASK_NS */
2422             if (!env->v7m.secure) {
2423                 return;
2424             }
2425             env->v7m.primask[M_REG_NS] = val & 1;
2426             return;
2427         case 0x91: /* BASEPRI_NS */
2428             if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2429                 return;
2430             }
2431             env->v7m.basepri[M_REG_NS] = val & 0xff;
2432             return;
2433         case 0x93: /* FAULTMASK_NS */
2434             if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2435                 return;
2436             }
2437             env->v7m.faultmask[M_REG_NS] = val & 1;
2438             return;
2439         case 0x94: /* CONTROL_NS */
2440             if (!env->v7m.secure) {
2441                 return;
2442             }
2443             write_v7m_control_spsel_for_secstate(env,
2444                                                  val & R_V7M_CONTROL_SPSEL_MASK,
2445                                                  M_REG_NS);
2446             if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
2447                 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
2448                 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
2449             }
2450             /*
2451              * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2452              * RES0 if the FPU is not present, and is stored in the S bank
2453              */
2454             if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env)) &&
2455                 extract32(env->v7m.nsacr, 10, 1)) {
2456                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2457                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2458             }
2459             return;
2460         case 0x98: /* SP_NS */
2461         {
2462             /*
2463              * This gives the non-secure SP selected based on whether we're
2464              * currently in handler mode or not, using the NS CONTROL.SPSEL.
2465              */
2466             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2467             bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
2468             uint32_t limit;
2469 
2470             if (!env->v7m.secure) {
2471                 return;
2472             }
2473 
2474             limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
2475 
2476             if (val < limit) {
2477                 CPUState *cs = env_cpu(env);
2478 
2479                 cpu_restore_state(cs, GETPC(), true);
2480                 raise_exception(env, EXCP_STKOF, 0, 1);
2481             }
2482 
2483             if (is_psp) {
2484                 env->v7m.other_ss_psp = val;
2485             } else {
2486                 env->v7m.other_ss_msp = val;
2487             }
2488             return;
2489         }
2490         default:
2491             break;
2492         }
2493     }
2494 
2495     switch (reg) {
2496     case 0 ... 7: /* xPSR sub-fields */
2497         v7m_msr_xpsr(env, mask, reg, val);
2498         break;
2499     case 8: /* MSP */
2500         if (v7m_using_psp(env)) {
2501             env->v7m.other_sp = val;
2502         } else {
2503             env->regs[13] = val;
2504         }
2505         break;
2506     case 9: /* PSP */
2507         if (v7m_using_psp(env)) {
2508             env->regs[13] = val;
2509         } else {
2510             env->v7m.other_sp = val;
2511         }
2512         break;
2513     case 10: /* MSPLIM */
2514         if (!arm_feature(env, ARM_FEATURE_V8)) {
2515             goto bad_reg;
2516         }
2517         env->v7m.msplim[env->v7m.secure] = val & ~7;
2518         break;
2519     case 11: /* PSPLIM */
2520         if (!arm_feature(env, ARM_FEATURE_V8)) {
2521             goto bad_reg;
2522         }
2523         env->v7m.psplim[env->v7m.secure] = val & ~7;
2524         break;
2525     case 16: /* PRIMASK */
2526         env->v7m.primask[env->v7m.secure] = val & 1;
2527         break;
2528     case 17: /* BASEPRI */
2529         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2530             goto bad_reg;
2531         }
2532         env->v7m.basepri[env->v7m.secure] = val & 0xff;
2533         break;
2534     case 18: /* BASEPRI_MAX */
2535         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2536             goto bad_reg;
2537         }
2538         val &= 0xff;
2539         if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
2540                          || env->v7m.basepri[env->v7m.secure] == 0)) {
2541             env->v7m.basepri[env->v7m.secure] = val;
2542         }
2543         break;
2544     case 19: /* FAULTMASK */
2545         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2546             goto bad_reg;
2547         }
2548         env->v7m.faultmask[env->v7m.secure] = val & 1;
2549         break;
2550     case 20: /* CONTROL */
2551         /*
2552          * Writing to the SPSEL bit only has an effect if we are in
2553          * thread mode; other bits can be updated by any privileged code.
2554          * write_v7m_control_spsel() deals with updating the SPSEL bit in
2555          * env->v7m.control, so we only need update the others.
2556          * For v7M, we must just ignore explicit writes to SPSEL in handler
2557          * mode; for v8M the write is permitted but will have no effect.
2558          * All these bits are writes-ignored from non-privileged code,
2559          * except for SFPA.
2560          */
2561         if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
2562                            !arm_v7m_is_handler_mode(env))) {
2563             write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
2564         }
2565         if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
2566             env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
2567             env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
2568         }
2569         if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
2570             /*
2571              * SFPA is RAZ/WI from NS or if no FPU.
2572              * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2573              * Both are stored in the S bank.
2574              */
2575             if (env->v7m.secure) {
2576                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2577                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
2578             }
2579             if (cur_el > 0 &&
2580                 (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
2581                  extract32(env->v7m.nsacr, 10, 1))) {
2582                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2583                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2584             }
2585         }
2586         break;
2587     default:
2588     bad_reg:
2589         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
2590                                        " register %d\n", reg);
2591         return;
2592     }
2593 }
2594 
HELPER(v7m_tt)2595 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2596 {
2597     /* Implement the TT instruction. op is bits [7:6] of the insn. */
2598     bool forceunpriv = op & 1;
2599     bool alt = op & 2;
2600     V8M_SAttributes sattrs = {};
2601     uint32_t tt_resp;
2602     bool r, rw, nsr, nsrw, mrvalid;
2603     int prot;
2604     ARMMMUFaultInfo fi = {};
2605     MemTxAttrs attrs = {};
2606     hwaddr phys_addr;
2607     ARMMMUIdx mmu_idx;
2608     uint32_t mregion;
2609     bool targetpriv;
2610     bool targetsec = env->v7m.secure;
2611     bool is_subpage;
2612 
2613     /*
2614      * Work out what the security state and privilege level we're
2615      * interested in is...
2616      */
2617     if (alt) {
2618         targetsec = !targetsec;
2619     }
2620 
2621     if (forceunpriv) {
2622         targetpriv = false;
2623     } else {
2624         targetpriv = arm_v7m_is_handler_mode(env) ||
2625             !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
2626     }
2627 
2628     /* ...and then figure out which MMU index this is */
2629     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
2630 
2631     /*
2632      * We know that the MPU and SAU don't care about the access type
2633      * for our purposes beyond that we don't want to claim to be
2634      * an insn fetch, so we arbitrarily call this a read.
2635      */
2636 
2637     /*
2638      * MPU region info only available for privileged or if
2639      * inspecting the other MPU state.
2640      */
2641     if (arm_current_el(env) != 0 || alt) {
2642         /* We can ignore the return value as prot is always set */
2643         pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
2644                           &phys_addr, &attrs, &prot, &is_subpage,
2645                           &fi, &mregion);
2646         if (mregion == -1) {
2647             mrvalid = false;
2648             mregion = 0;
2649         } else {
2650             mrvalid = true;
2651         }
2652         r = prot & PAGE_READ;
2653         rw = prot & PAGE_WRITE;
2654     } else {
2655         r = false;
2656         rw = false;
2657         mrvalid = false;
2658         mregion = 0;
2659     }
2660 
2661     if (env->v7m.secure) {
2662         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
2663         nsr = sattrs.ns && r;
2664         nsrw = sattrs.ns && rw;
2665     } else {
2666         sattrs.ns = true;
2667         nsr = false;
2668         nsrw = false;
2669     }
2670 
2671     tt_resp = (sattrs.iregion << 24) |
2672         (sattrs.irvalid << 23) |
2673         ((!sattrs.ns) << 22) |
2674         (nsrw << 21) |
2675         (nsr << 20) |
2676         (rw << 19) |
2677         (r << 18) |
2678         (sattrs.srvalid << 17) |
2679         (mrvalid << 16) |
2680         (sattrs.sregion << 8) |
2681         mregion;
2682 
2683     return tt_resp;
2684 }
2685 
2686 #endif /* !CONFIG_USER_ONLY */
2687 
arm_v7m_mmu_idx_all(CPUARMState * env,bool secstate,bool priv,bool negpri)2688 ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
2689                               bool secstate, bool priv, bool negpri)
2690 {
2691     ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
2692 
2693     if (priv) {
2694         mmu_idx |= ARM_MMU_IDX_M_PRIV;
2695     }
2696 
2697     if (negpri) {
2698         mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
2699     }
2700 
2701     if (secstate) {
2702         mmu_idx |= ARM_MMU_IDX_M_S;
2703     }
2704 
2705     return mmu_idx;
2706 }
2707 
arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState * env,bool secstate,bool priv)2708 ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
2709                                                 bool secstate, bool priv)
2710 {
2711     bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
2712 
2713     return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
2714 }
2715 
2716 /* Return the MMU index for a v7M CPU in the specified security state */
arm_v7m_mmu_idx_for_secstate(CPUARMState * env,bool secstate)2717 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
2718 {
2719     bool priv = arm_current_el(env) != 0;
2720 
2721     return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
2722 }
2723