1 /*
2  * ARM generic helpers.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "target/arm/idau.h"
12 #include "trace.h"
13 #include "cpu.h"
14 #include "internals.h"
15 #include "exec/gdbstub.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include <zlib.h> /* For crc32 */
24 #include "hw/semihosting/semihost.h"
25 #include "sysemu/cpus.h"
26 #include "sysemu/kvm.h"
27 #include "qemu/range.h"
28 #include "qapi/qapi-commands-machine-target.h"
29 #include "qapi/error.h"
30 #include "qemu/guest-random.h"
31 #ifdef CONFIG_TCG
32 #include "arm_ldst.h"
33 #include "exec/cpu_ldst.h"
34 #endif
35 
v7m_msr_xpsr(CPUARMState * env,uint32_t mask,uint32_t reg,uint32_t val)36 static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask,
37                          uint32_t reg, uint32_t val)
38 {
39     /* Only APSR is actually writable */
40     if (!(reg & 4)) {
41         uint32_t apsrmask = 0;
42 
43         if (mask & 8) {
44             apsrmask |= XPSR_NZCV | XPSR_Q;
45         }
46         if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
47             apsrmask |= XPSR_GE;
48         }
49         xpsr_write(env, val, apsrmask);
50     }
51 }
52 
v7m_mrs_xpsr(CPUARMState * env,uint32_t reg,unsigned el)53 static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el)
54 {
55     uint32_t mask = 0;
56 
57     if ((reg & 1) && el) {
58         mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
59     }
60     if (!(reg & 4)) {
61         mask |= XPSR_NZCV | XPSR_Q; /* APSR */
62         if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
63             mask |= XPSR_GE;
64         }
65     }
66     /* EPSR reads as zero */
67     return xpsr_read(env) & mask;
68 }
69 
v7m_mrs_control(CPUARMState * env,uint32_t secure)70 static uint32_t v7m_mrs_control(CPUARMState *env, uint32_t secure)
71 {
72     uint32_t value = env->v7m.control[secure];
73 
74     if (!secure) {
75         /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
76         value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
77     }
78     return value;
79 }
80 
81 #ifdef CONFIG_USER_ONLY
82 
HELPER(v7m_msr)83 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
84 {
85     uint32_t mask = extract32(maskreg, 8, 4);
86     uint32_t reg = extract32(maskreg, 0, 8);
87 
88     switch (reg) {
89     case 0 ... 7: /* xPSR sub-fields */
90         v7m_msr_xpsr(env, mask, reg, val);
91         break;
92     case 20: /* CONTROL */
93         /* There are no sub-fields that are actually writable from EL0. */
94         break;
95     default:
96         /* Unprivileged writes to other registers are ignored */
97         break;
98     }
99 }
100 
HELPER(v7m_mrs)101 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
102 {
103     switch (reg) {
104     case 0 ... 7: /* xPSR sub-fields */
105         return v7m_mrs_xpsr(env, reg, 0);
106     case 20: /* CONTROL */
107         return v7m_mrs_control(env, 0);
108     default:
109         /* Unprivileged reads others as zero.  */
110         return 0;
111     }
112 }
113 
HELPER(v7m_bxns)114 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
115 {
116     /* translate.c should never generate calls here in user-only mode */
117     g_assert_not_reached();
118 }
119 
HELPER(v7m_blxns)120 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
121 {
122     /* translate.c should never generate calls here in user-only mode */
123     g_assert_not_reached();
124 }
125 
HELPER(v7m_preserve_fp_state)126 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
127 {
128     /* translate.c should never generate calls here in user-only mode */
129     g_assert_not_reached();
130 }
131 
HELPER(v7m_vlstm)132 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
133 {
134     /* translate.c should never generate calls here in user-only mode */
135     g_assert_not_reached();
136 }
137 
HELPER(v7m_vlldm)138 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
139 {
140     /* translate.c should never generate calls here in user-only mode */
141     g_assert_not_reached();
142 }
143 
HELPER(v7m_tt)144 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
145 {
146     /*
147      * The TT instructions can be used by unprivileged code, but in
148      * user-only emulation we don't have the MPU.
149      * Luckily since we know we are NonSecure unprivileged (and that in
150      * turn means that the A flag wasn't specified), all the bits in the
151      * register must be zero:
152      *  IREGION: 0 because IRVALID is 0
153      *  IRVALID: 0 because NS
154      *  S: 0 because NS
155      *  NSRW: 0 because NS
156      *  NSR: 0 because NS
157      *  RW: 0 because unpriv and A flag not set
158      *  R: 0 because unpriv and A flag not set
159      *  SRVALID: 0 because NS
160      *  MRVALID: 0 because unpriv and A flag not set
161      *  SREGION: 0 becaus SRVALID is 0
162      *  MREGION: 0 because MRVALID is 0
163      */
164     return 0;
165 }
166 
167 #else
168 
169 /*
170  * What kind of stack write are we doing? This affects how exceptions
171  * generated during the stacking are treated.
172  */
173 typedef enum StackingMode {
174     STACK_NORMAL,
175     STACK_IGNFAULTS,
176     STACK_LAZYFP,
177 } StackingMode;
178 
v7m_stack_write(ARMCPU * cpu,uint32_t addr,uint32_t value,ARMMMUIdx mmu_idx,StackingMode mode)179 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
180                             ARMMMUIdx mmu_idx, StackingMode mode)
181 {
182     CPUState *cs = CPU(cpu);
183     CPUARMState *env = &cpu->env;
184     MemTxAttrs attrs = {};
185     MemTxResult txres;
186     target_ulong page_size;
187     hwaddr physaddr;
188     int prot;
189     ARMMMUFaultInfo fi = {};
190     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
191     int exc;
192     bool exc_secure;
193 
194     if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
195                       &attrs, &prot, &page_size, &fi, NULL)) {
196         /* MPU/SAU lookup failed */
197         if (fi.type == ARMFault_QEMU_SFault) {
198             if (mode == STACK_LAZYFP) {
199                 qemu_log_mask(CPU_LOG_INT,
200                               "...SecureFault with SFSR.LSPERR "
201                               "during lazy stacking\n");
202                 env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
203             } else {
204                 qemu_log_mask(CPU_LOG_INT,
205                               "...SecureFault with SFSR.AUVIOL "
206                               "during stacking\n");
207                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
208             }
209             env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
210             env->v7m.sfar = addr;
211             exc = ARMV7M_EXCP_SECURE;
212             exc_secure = false;
213         } else {
214             if (mode == STACK_LAZYFP) {
215                 qemu_log_mask(CPU_LOG_INT,
216                               "...MemManageFault with CFSR.MLSPERR\n");
217                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
218             } else {
219                 qemu_log_mask(CPU_LOG_INT,
220                               "...MemManageFault with CFSR.MSTKERR\n");
221                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
222             }
223             exc = ARMV7M_EXCP_MEM;
224             exc_secure = secure;
225         }
226         goto pend_fault;
227     }
228     address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
229                          attrs, &txres);
230     if (txres != MEMTX_OK) {
231         /* BusFault trying to write the data */
232         if (mode == STACK_LAZYFP) {
233             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
234             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
235         } else {
236             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
237             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
238         }
239         exc = ARMV7M_EXCP_BUS;
240         exc_secure = false;
241         goto pend_fault;
242     }
243     return true;
244 
245 pend_fault:
246     /*
247      * By pending the exception at this point we are making
248      * the IMPDEF choice "overridden exceptions pended" (see the
249      * MergeExcInfo() pseudocode). The other choice would be to not
250      * pend them now and then make a choice about which to throw away
251      * later if we have two derived exceptions.
252      * The only case when we must not pend the exception but instead
253      * throw it away is if we are doing the push of the callee registers
254      * and we've already generated a derived exception (this is indicated
255      * by the caller passing STACK_IGNFAULTS). Even in this case we will
256      * still update the fault status registers.
257      */
258     switch (mode) {
259     case STACK_NORMAL:
260         armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
261         break;
262     case STACK_LAZYFP:
263         armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
264         break;
265     case STACK_IGNFAULTS:
266         break;
267     }
268     return false;
269 }
270 
v7m_stack_read(ARMCPU * cpu,uint32_t * dest,uint32_t addr,ARMMMUIdx mmu_idx)271 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
272                            ARMMMUIdx mmu_idx)
273 {
274     CPUState *cs = CPU(cpu);
275     CPUARMState *env = &cpu->env;
276     MemTxAttrs attrs = {};
277     MemTxResult txres;
278     target_ulong page_size;
279     hwaddr physaddr;
280     int prot;
281     ARMMMUFaultInfo fi = {};
282     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
283     int exc;
284     bool exc_secure;
285     uint32_t value;
286 
287     if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
288                       &attrs, &prot, &page_size, &fi, NULL)) {
289         /* MPU/SAU lookup failed */
290         if (fi.type == ARMFault_QEMU_SFault) {
291             qemu_log_mask(CPU_LOG_INT,
292                           "...SecureFault with SFSR.AUVIOL during unstack\n");
293             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
294             env->v7m.sfar = addr;
295             exc = ARMV7M_EXCP_SECURE;
296             exc_secure = false;
297         } else {
298             qemu_log_mask(CPU_LOG_INT,
299                           "...MemManageFault with CFSR.MUNSTKERR\n");
300             env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
301             exc = ARMV7M_EXCP_MEM;
302             exc_secure = secure;
303         }
304         goto pend_fault;
305     }
306 
307     value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
308                               attrs, &txres);
309     if (txres != MEMTX_OK) {
310         /* BusFault trying to read the data */
311         qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
312         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
313         exc = ARMV7M_EXCP_BUS;
314         exc_secure = false;
315         goto pend_fault;
316     }
317 
318     *dest = value;
319     return true;
320 
321 pend_fault:
322     /*
323      * By pending the exception at this point we are making
324      * the IMPDEF choice "overridden exceptions pended" (see the
325      * MergeExcInfo() pseudocode). The other choice would be to not
326      * pend them now and then make a choice about which to throw away
327      * later if we have two derived exceptions.
328      */
329     armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
330     return false;
331 }
332 
HELPER(v7m_preserve_fp_state)333 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
334 {
335     /*
336      * Preserve FP state (because LSPACT was set and we are about
337      * to execute an FP instruction). This corresponds to the
338      * PreserveFPState() pseudocode.
339      * We may throw an exception if the stacking fails.
340      */
341     ARMCPU *cpu = env_archcpu(env);
342     bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
343     bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
344     bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
345     bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
346     uint32_t fpcar = env->v7m.fpcar[is_secure];
347     bool stacked_ok = true;
348     bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
349     bool take_exception;
350 
351     /* Take the iothread lock as we are going to touch the NVIC */
352     qemu_mutex_lock_iothread();
353 
354     /* Check the background context had access to the FPU */
355     if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
356         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
357         env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
358         stacked_ok = false;
359     } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
360         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
361         env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
362         stacked_ok = false;
363     }
364 
365     if (!splimviol && stacked_ok) {
366         /* We only stack if the stack limit wasn't violated */
367         int i;
368         ARMMMUIdx mmu_idx;
369 
370         mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
371         for (i = 0; i < (ts ? 32 : 16); i += 2) {
372             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
373             uint32_t faddr = fpcar + 4 * i;
374             uint32_t slo = extract64(dn, 0, 32);
375             uint32_t shi = extract64(dn, 32, 32);
376 
377             if (i >= 16) {
378                 faddr += 8; /* skip the slot for the FPSCR */
379             }
380             stacked_ok = stacked_ok &&
381                 v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
382                 v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
383         }
384 
385         stacked_ok = stacked_ok &&
386             v7m_stack_write(cpu, fpcar + 0x40,
387                             vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
388     }
389 
390     /*
391      * We definitely pended an exception, but it's possible that it
392      * might not be able to be taken now. If its priority permits us
393      * to take it now, then we must not update the LSPACT or FP regs,
394      * but instead jump out to take the exception immediately.
395      * If it's just pending and won't be taken until the current
396      * handler exits, then we do update LSPACT and the FP regs.
397      */
398     take_exception = !stacked_ok &&
399         armv7m_nvic_can_take_pending_exception(env->nvic);
400 
401     qemu_mutex_unlock_iothread();
402 
403     if (take_exception) {
404         raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
405     }
406 
407     env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
408 
409     if (ts) {
410         /* Clear s0 to s31 and the FPSCR */
411         int i;
412 
413         for (i = 0; i < 32; i += 2) {
414             *aa32_vfp_dreg(env, i / 2) = 0;
415         }
416         vfp_set_fpscr(env, 0);
417     }
418     /*
419      * Otherwise s0 to s15 and FPSCR are UNKNOWN; we choose to leave them
420      * unchanged.
421      */
422 }
423 
424 /*
425  * Write to v7M CONTROL.SPSEL bit for the specified security bank.
426  * This may change the current stack pointer between Main and Process
427  * stack pointers if it is done for the CONTROL register for the current
428  * security state.
429  */
write_v7m_control_spsel_for_secstate(CPUARMState * env,bool new_spsel,bool secstate)430 static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
431                                                  bool new_spsel,
432                                                  bool secstate)
433 {
434     bool old_is_psp = v7m_using_psp(env);
435 
436     env->v7m.control[secstate] =
437         deposit32(env->v7m.control[secstate],
438                   R_V7M_CONTROL_SPSEL_SHIFT,
439                   R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
440 
441     if (secstate == env->v7m.secure) {
442         bool new_is_psp = v7m_using_psp(env);
443         uint32_t tmp;
444 
445         if (old_is_psp != new_is_psp) {
446             tmp = env->v7m.other_sp;
447             env->v7m.other_sp = env->regs[13];
448             env->regs[13] = tmp;
449         }
450     }
451 }
452 
453 /*
454  * Write to v7M CONTROL.SPSEL bit. This may change the current
455  * stack pointer between Main and Process stack pointers.
456  */
write_v7m_control_spsel(CPUARMState * env,bool new_spsel)457 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
458 {
459     write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
460 }
461 
write_v7m_exception(CPUARMState * env,uint32_t new_exc)462 void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
463 {
464     /*
465      * Write a new value to v7m.exception, thus transitioning into or out
466      * of Handler mode; this may result in a change of active stack pointer.
467      */
468     bool new_is_psp, old_is_psp = v7m_using_psp(env);
469     uint32_t tmp;
470 
471     env->v7m.exception = new_exc;
472 
473     new_is_psp = v7m_using_psp(env);
474 
475     if (old_is_psp != new_is_psp) {
476         tmp = env->v7m.other_sp;
477         env->v7m.other_sp = env->regs[13];
478         env->regs[13] = tmp;
479     }
480 }
481 
482 /* Switch M profile security state between NS and S */
switch_v7m_security_state(CPUARMState * env,bool new_secstate)483 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
484 {
485     uint32_t new_ss_msp, new_ss_psp;
486 
487     if (env->v7m.secure == new_secstate) {
488         return;
489     }
490 
491     /*
492      * All the banked state is accessed by looking at env->v7m.secure
493      * except for the stack pointer; rearrange the SP appropriately.
494      */
495     new_ss_msp = env->v7m.other_ss_msp;
496     new_ss_psp = env->v7m.other_ss_psp;
497 
498     if (v7m_using_psp(env)) {
499         env->v7m.other_ss_psp = env->regs[13];
500         env->v7m.other_ss_msp = env->v7m.other_sp;
501     } else {
502         env->v7m.other_ss_msp = env->regs[13];
503         env->v7m.other_ss_psp = env->v7m.other_sp;
504     }
505 
506     env->v7m.secure = new_secstate;
507 
508     if (v7m_using_psp(env)) {
509         env->regs[13] = new_ss_psp;
510         env->v7m.other_sp = new_ss_msp;
511     } else {
512         env->regs[13] = new_ss_msp;
513         env->v7m.other_sp = new_ss_psp;
514     }
515 }
516 
HELPER(v7m_bxns)517 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
518 {
519     /*
520      * Handle v7M BXNS:
521      *  - if the return value is a magic value, do exception return (like BX)
522      *  - otherwise bit 0 of the return value is the target security state
523      */
524     uint32_t min_magic;
525 
526     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
527         /* Covers FNC_RETURN and EXC_RETURN magic */
528         min_magic = FNC_RETURN_MIN_MAGIC;
529     } else {
530         /* EXC_RETURN magic only */
531         min_magic = EXC_RETURN_MIN_MAGIC;
532     }
533 
534     if (dest >= min_magic) {
535         /*
536          * This is an exception return magic value; put it where
537          * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
538          * Note that if we ever add gen_ss_advance() singlestep support to
539          * M profile this should count as an "instruction execution complete"
540          * event (compare gen_bx_excret_final_code()).
541          */
542         env->regs[15] = dest & ~1;
543         env->thumb = dest & 1;
544         HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
545         /* notreached */
546     }
547 
548     /* translate.c should have made BXNS UNDEF unless we're secure */
549     assert(env->v7m.secure);
550 
551     if (!(dest & 1)) {
552         env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
553     }
554     switch_v7m_security_state(env, dest & 1);
555     env->thumb = 1;
556     env->regs[15] = dest & ~1;
557     arm_rebuild_hflags(env);
558 }
559 
HELPER(v7m_blxns)560 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
561 {
562     /*
563      * Handle v7M BLXNS:
564      *  - bit 0 of the destination address is the target security state
565      */
566 
567     /* At this point regs[15] is the address just after the BLXNS */
568     uint32_t nextinst = env->regs[15] | 1;
569     uint32_t sp = env->regs[13] - 8;
570     uint32_t saved_psr;
571 
572     /* translate.c will have made BLXNS UNDEF unless we're secure */
573     assert(env->v7m.secure);
574 
575     if (dest & 1) {
576         /*
577          * Target is Secure, so this is just a normal BLX,
578          * except that the low bit doesn't indicate Thumb/not.
579          */
580         env->regs[14] = nextinst;
581         env->thumb = 1;
582         env->regs[15] = dest & ~1;
583         return;
584     }
585 
586     /* Target is non-secure: first push a stack frame */
587     if (!QEMU_IS_ALIGNED(sp, 8)) {
588         qemu_log_mask(LOG_GUEST_ERROR,
589                       "BLXNS with misaligned SP is UNPREDICTABLE\n");
590     }
591 
592     if (sp < v7m_sp_limit(env)) {
593         raise_exception(env, EXCP_STKOF, 0, 1);
594     }
595 
596     saved_psr = env->v7m.exception;
597     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
598         saved_psr |= XPSR_SFPA;
599     }
600 
601     /* Note that these stores can throw exceptions on MPU faults */
602     cpu_stl_data_ra(env, sp, nextinst, GETPC());
603     cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
604 
605     env->regs[13] = sp;
606     env->regs[14] = 0xfeffffff;
607     if (arm_v7m_is_handler_mode(env)) {
608         /*
609          * Write a dummy value to IPSR, to avoid leaking the current secure
610          * exception number to non-secure code. This is guaranteed not
611          * to cause write_v7m_exception() to actually change stacks.
612          */
613         write_v7m_exception(env, 1);
614     }
615     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
616     switch_v7m_security_state(env, 0);
617     env->thumb = 1;
618     env->regs[15] = dest;
619     arm_rebuild_hflags(env);
620 }
621 
get_v7m_sp_ptr(CPUARMState * env,bool secure,bool threadmode,bool spsel)622 static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
623                                 bool spsel)
624 {
625     /*
626      * Return a pointer to the location where we currently store the
627      * stack pointer for the requested security state and thread mode.
628      * This pointer will become invalid if the CPU state is updated
629      * such that the stack pointers are switched around (eg changing
630      * the SPSEL control bit).
631      * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
632      * Unlike that pseudocode, we require the caller to pass us in the
633      * SPSEL control bit value; this is because we also use this
634      * function in handling of pushing of the callee-saves registers
635      * part of the v8M stack frame (pseudocode PushCalleeStack()),
636      * and in the tailchain codepath the SPSEL bit comes from the exception
637      * return magic LR value from the previous exception. The pseudocode
638      * opencodes the stack-selection in PushCalleeStack(), but we prefer
639      * to make this utility function generic enough to do the job.
640      */
641     bool want_psp = threadmode && spsel;
642 
643     if (secure == env->v7m.secure) {
644         if (want_psp == v7m_using_psp(env)) {
645             return &env->regs[13];
646         } else {
647             return &env->v7m.other_sp;
648         }
649     } else {
650         if (want_psp) {
651             return &env->v7m.other_ss_psp;
652         } else {
653             return &env->v7m.other_ss_msp;
654         }
655     }
656 }
657 
arm_v7m_load_vector(ARMCPU * cpu,int exc,bool targets_secure,uint32_t * pvec)658 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
659                                 uint32_t *pvec)
660 {
661     CPUState *cs = CPU(cpu);
662     CPUARMState *env = &cpu->env;
663     MemTxResult result;
664     uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
665     uint32_t vector_entry;
666     MemTxAttrs attrs = {};
667     ARMMMUIdx mmu_idx;
668     bool exc_secure;
669 
670     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
671 
672     /*
673      * We don't do a get_phys_addr() here because the rules for vector
674      * loads are special: they always use the default memory map, and
675      * the default memory map permits reads from all addresses.
676      * Since there's no easy way to pass through to pmsav8_mpu_lookup()
677      * that we want this special case which would always say "yes",
678      * we just do the SAU lookup here followed by a direct physical load.
679      */
680     attrs.secure = targets_secure;
681     attrs.user = false;
682 
683     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
684         V8M_SAttributes sattrs = {};
685 
686         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
687         if (sattrs.ns) {
688             attrs.secure = false;
689         } else if (!targets_secure) {
690             /*
691              * NS access to S memory: the underlying exception which we escalate
692              * to HardFault is SecureFault, which always targets Secure.
693              */
694             exc_secure = true;
695             goto load_fail;
696         }
697     }
698 
699     vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
700                                      attrs, &result);
701     if (result != MEMTX_OK) {
702         /*
703          * Underlying exception is BusFault: its target security state
704          * depends on BFHFNMINS.
705          */
706         exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
707         goto load_fail;
708     }
709     *pvec = vector_entry;
710     return true;
711 
712 load_fail:
713     /*
714      * All vector table fetch fails are reported as HardFault, with
715      * HFSR.VECTTBL and .FORCED set. (FORCED is set because
716      * technically the underlying exception is a SecureFault or BusFault
717      * that is escalated to HardFault.) This is a terminal exception,
718      * so we will either take the HardFault immediately or else enter
719      * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
720      * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
721      * secure); otherwise it targets the same security state as the
722      * underlying exception.
723      */
724     if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
725         exc_secure = true;
726     }
727     env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
728     armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
729     return false;
730 }
731 
v7m_integrity_sig(CPUARMState * env,uint32_t lr)732 static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
733 {
734     /*
735      * Return the integrity signature value for the callee-saves
736      * stack frame section. @lr is the exception return payload/LR value
737      * whose FType bit forms bit 0 of the signature if FP is present.
738      */
739     uint32_t sig = 0xfefa125a;
740 
741     if (!arm_feature(env, ARM_FEATURE_VFP) || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
742         sig |= 1;
743     }
744     return sig;
745 }
746 
v7m_push_callee_stack(ARMCPU * cpu,uint32_t lr,bool dotailchain,bool ignore_faults)747 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
748                                   bool ignore_faults)
749 {
750     /*
751      * For v8M, push the callee-saves register part of the stack frame.
752      * Compare the v8M pseudocode PushCalleeStack().
753      * In the tailchaining case this may not be the current stack.
754      */
755     CPUARMState *env = &cpu->env;
756     uint32_t *frame_sp_p;
757     uint32_t frameptr;
758     ARMMMUIdx mmu_idx;
759     bool stacked_ok;
760     uint32_t limit;
761     bool want_psp;
762     uint32_t sig;
763     StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
764 
765     if (dotailchain) {
766         bool mode = lr & R_V7M_EXCRET_MODE_MASK;
767         bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
768             !mode;
769 
770         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
771         frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
772                                     lr & R_V7M_EXCRET_SPSEL_MASK);
773         want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
774         if (want_psp) {
775             limit = env->v7m.psplim[M_REG_S];
776         } else {
777             limit = env->v7m.msplim[M_REG_S];
778         }
779     } else {
780         mmu_idx = arm_mmu_idx(env);
781         frame_sp_p = &env->regs[13];
782         limit = v7m_sp_limit(env);
783     }
784 
785     frameptr = *frame_sp_p - 0x28;
786     if (frameptr < limit) {
787         /*
788          * Stack limit failure: set SP to the limit value, and generate
789          * STKOF UsageFault. Stack pushes below the limit must not be
790          * performed. It is IMPDEF whether pushes above the limit are
791          * performed; we choose not to.
792          */
793         qemu_log_mask(CPU_LOG_INT,
794                       "...STKOF during callee-saves register stacking\n");
795         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
796         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
797                                 env->v7m.secure);
798         *frame_sp_p = limit;
799         return true;
800     }
801 
802     /*
803      * Write as much of the stack frame as we can. A write failure may
804      * cause us to pend a derived exception.
805      */
806     sig = v7m_integrity_sig(env, lr);
807     stacked_ok =
808         v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
809         v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
810         v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
811         v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
812         v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
813         v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
814         v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
815         v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
816         v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
817 
818     /* Update SP regardless of whether any of the stack accesses failed. */
819     *frame_sp_p = frameptr;
820 
821     return !stacked_ok;
822 }
823 
v7m_exception_taken(ARMCPU * cpu,uint32_t lr,bool dotailchain,bool ignore_stackfaults)824 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
825                                 bool ignore_stackfaults)
826 {
827     /*
828      * Do the "take the exception" parts of exception entry,
829      * but not the pushing of state to the stack. This is
830      * similar to the pseudocode ExceptionTaken() function.
831      */
832     CPUARMState *env = &cpu->env;
833     uint32_t addr;
834     bool targets_secure;
835     int exc;
836     bool push_failed = false;
837 
838     armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
839     qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
840                   targets_secure ? "secure" : "nonsecure", exc);
841 
842     if (dotailchain) {
843         /* Sanitize LR FType and PREFIX bits */
844         if (!arm_feature(env, ARM_FEATURE_VFP)) {
845             lr |= R_V7M_EXCRET_FTYPE_MASK;
846         }
847         lr = deposit32(lr, 24, 8, 0xff);
848     }
849 
850     if (arm_feature(env, ARM_FEATURE_V8)) {
851         if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
852             (lr & R_V7M_EXCRET_S_MASK)) {
853             /*
854              * The background code (the owner of the registers in the
855              * exception frame) is Secure. This means it may either already
856              * have or now needs to push callee-saves registers.
857              */
858             if (targets_secure) {
859                 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
860                     /*
861                      * We took an exception from Secure to NonSecure
862                      * (which means the callee-saved registers got stacked)
863                      * and are now tailchaining to a Secure exception.
864                      * Clear DCRS so eventual return from this Secure
865                      * exception unstacks the callee-saved registers.
866                      */
867                     lr &= ~R_V7M_EXCRET_DCRS_MASK;
868                 }
869             } else {
870                 /*
871                  * We're going to a non-secure exception; push the
872                  * callee-saves registers to the stack now, if they're
873                  * not already saved.
874                  */
875                 if (lr & R_V7M_EXCRET_DCRS_MASK &&
876                     !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
877                     push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
878                                                         ignore_stackfaults);
879                 }
880                 lr |= R_V7M_EXCRET_DCRS_MASK;
881             }
882         }
883 
884         lr &= ~R_V7M_EXCRET_ES_MASK;
885         if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
886             lr |= R_V7M_EXCRET_ES_MASK;
887         }
888         lr &= ~R_V7M_EXCRET_SPSEL_MASK;
889         if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
890             lr |= R_V7M_EXCRET_SPSEL_MASK;
891         }
892 
893         /*
894          * Clear registers if necessary to prevent non-secure exception
895          * code being able to see register values from secure code.
896          * Where register values become architecturally UNKNOWN we leave
897          * them with their previous values.
898          */
899         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
900             if (!targets_secure) {
901                 /*
902                  * Always clear the caller-saved registers (they have been
903                  * pushed to the stack earlier in v7m_push_stack()).
904                  * Clear callee-saved registers if the background code is
905                  * Secure (in which case these regs were saved in
906                  * v7m_push_callee_stack()).
907                  */
908                 int i;
909 
910                 for (i = 0; i < 13; i++) {
911                     /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
912                     if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
913                         env->regs[i] = 0;
914                     }
915                 }
916                 /* Clear EAPSR */
917                 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
918             }
919         }
920     }
921 
922     if (push_failed && !ignore_stackfaults) {
923         /*
924          * Derived exception on callee-saves register stacking:
925          * we might now want to take a different exception which
926          * targets a different security state, so try again from the top.
927          */
928         qemu_log_mask(CPU_LOG_INT,
929                       "...derived exception on callee-saves register stacking");
930         v7m_exception_taken(cpu, lr, true, true);
931         return;
932     }
933 
934     if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
935         /* Vector load failed: derived exception */
936         qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
937         v7m_exception_taken(cpu, lr, true, true);
938         return;
939     }
940 
941     /*
942      * Now we've done everything that might cause a derived exception
943      * we can go ahead and activate whichever exception we're going to
944      * take (which might now be the derived exception).
945      */
946     armv7m_nvic_acknowledge_irq(env->nvic);
947 
948     /* Switch to target security state -- must do this before writing SPSEL */
949     switch_v7m_security_state(env, targets_secure);
950     write_v7m_control_spsel(env, 0);
951     arm_clear_exclusive(env);
952     /* Clear SFPA and FPCA (has no effect if no FPU) */
953     env->v7m.control[M_REG_S] &=
954         ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
955     /* Clear IT bits */
956     env->condexec_bits = 0;
957     env->regs[14] = lr;
958     env->regs[15] = addr & 0xfffffffe;
959     env->thumb = addr & 1;
960     arm_rebuild_hflags(env);
961 }
962 
v7m_update_fpccr(CPUARMState * env,uint32_t frameptr,bool apply_splim)963 static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
964                              bool apply_splim)
965 {
966     /*
967      * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
968      * that we will need later in order to do lazy FP reg stacking.
969      */
970     bool is_secure = env->v7m.secure;
971     void *nvic = env->nvic;
972     /*
973      * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
974      * are banked and we want to update the bit in the bank for the
975      * current security state; and in one case we want to specifically
976      * update the NS banked version of a bit even if we are secure.
977      */
978     uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
979     uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
980     uint32_t *fpccr = &env->v7m.fpccr[is_secure];
981     bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
982 
983     env->v7m.fpcar[is_secure] = frameptr & ~0x7;
984 
985     if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
986         bool splimviol;
987         uint32_t splim = v7m_sp_limit(env);
988         bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
989             (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
990 
991         splimviol = !ign && frameptr < splim;
992         *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
993     }
994 
995     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
996 
997     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
998 
999     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
1000 
1001     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
1002                         !arm_v7m_is_handler_mode(env));
1003 
1004     hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
1005     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
1006 
1007     bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
1008     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
1009 
1010     mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
1011     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
1012 
1013     ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
1014     *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
1015 
1016     monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
1017     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
1018 
1019     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1020         s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
1021         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
1022 
1023         sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
1024         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
1025     }
1026 }
1027 
HELPER(v7m_vlstm)1028 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
1029 {
1030     /* fptr is the value of Rn, the frame pointer we store the FP regs to */
1031     bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1032     bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
1033     uintptr_t ra = GETPC();
1034 
1035     assert(env->v7m.secure);
1036 
1037     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1038         return;
1039     }
1040 
1041     /* Check access to the coprocessor is permitted */
1042     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1043         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1044     }
1045 
1046     if (lspact) {
1047         /* LSPACT should not be active when there is active FP state */
1048         raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
1049     }
1050 
1051     if (fptr & 7) {
1052         raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1053     }
1054 
1055     /*
1056      * Note that we do not use v7m_stack_write() here, because the
1057      * accesses should not set the FSR bits for stacking errors if they
1058      * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
1059      * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
1060      * and longjmp out.
1061      */
1062     if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1063         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1064         int i;
1065 
1066         for (i = 0; i < (ts ? 32 : 16); i += 2) {
1067             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1068             uint32_t faddr = fptr + 4 * i;
1069             uint32_t slo = extract64(dn, 0, 32);
1070             uint32_t shi = extract64(dn, 32, 32);
1071 
1072             if (i >= 16) {
1073                 faddr += 8; /* skip the slot for the FPSCR */
1074             }
1075             cpu_stl_data_ra(env, faddr, slo, ra);
1076             cpu_stl_data_ra(env, faddr + 4, shi, ra);
1077         }
1078         cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
1079 
1080         /*
1081          * If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
1082          * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1083          */
1084         if (ts) {
1085             for (i = 0; i < 32; i += 2) {
1086                 *aa32_vfp_dreg(env, i / 2) = 0;
1087             }
1088             vfp_set_fpscr(env, 0);
1089         }
1090     } else {
1091         v7m_update_fpccr(env, fptr, false);
1092     }
1093 
1094     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
1095 }
1096 
HELPER(v7m_vlldm)1097 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
1098 {
1099     uintptr_t ra = GETPC();
1100 
1101     /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1102     assert(env->v7m.secure);
1103 
1104     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1105         return;
1106     }
1107 
1108     /* Check access to the coprocessor is permitted */
1109     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1110         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1111     }
1112 
1113     if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1114         /* State in FP is still valid */
1115         env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
1116     } else {
1117         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1118         int i;
1119         uint32_t fpscr;
1120 
1121         if (fptr & 7) {
1122             raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1123         }
1124 
1125         for (i = 0; i < (ts ? 32 : 16); i += 2) {
1126             uint32_t slo, shi;
1127             uint64_t dn;
1128             uint32_t faddr = fptr + 4 * i;
1129 
1130             if (i >= 16) {
1131                 faddr += 8; /* skip the slot for the FPSCR */
1132             }
1133 
1134             slo = cpu_ldl_data_ra(env, faddr, ra);
1135             shi = cpu_ldl_data_ra(env, faddr + 4, ra);
1136 
1137             dn = (uint64_t) shi << 32 | slo;
1138             *aa32_vfp_dreg(env, i / 2) = dn;
1139         }
1140         fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
1141         vfp_set_fpscr(env, fpscr);
1142     }
1143 
1144     env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
1145 }
1146 
v7m_push_stack(ARMCPU * cpu)1147 static bool v7m_push_stack(ARMCPU *cpu)
1148 {
1149     /*
1150      * Do the "set up stack frame" part of exception entry,
1151      * similar to pseudocode PushStack().
1152      * Return true if we generate a derived exception (and so
1153      * should ignore further stack faults trying to process
1154      * that derived exception.)
1155      */
1156     bool stacked_ok = true, limitviol = false;
1157     CPUARMState *env = &cpu->env;
1158     uint32_t xpsr = xpsr_read(env);
1159     uint32_t frameptr = env->regs[13];
1160     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1161     uint32_t framesize;
1162     bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
1163 
1164     if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
1165         (env->v7m.secure || nsacr_cp10)) {
1166         if (env->v7m.secure &&
1167             env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
1168             framesize = 0xa8;
1169         } else {
1170             framesize = 0x68;
1171         }
1172     } else {
1173         framesize = 0x20;
1174     }
1175 
1176     /* Align stack pointer if the guest wants that */
1177     if ((frameptr & 4) &&
1178         (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
1179         frameptr -= 4;
1180         xpsr |= XPSR_SPREALIGN;
1181     }
1182 
1183     xpsr &= ~XPSR_SFPA;
1184     if (env->v7m.secure &&
1185         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1186         xpsr |= XPSR_SFPA;
1187     }
1188 
1189     frameptr -= framesize;
1190 
1191     if (arm_feature(env, ARM_FEATURE_V8)) {
1192         uint32_t limit = v7m_sp_limit(env);
1193 
1194         if (frameptr < limit) {
1195             /*
1196              * Stack limit failure: set SP to the limit value, and generate
1197              * STKOF UsageFault. Stack pushes below the limit must not be
1198              * performed. It is IMPDEF whether pushes above the limit are
1199              * performed; we choose not to.
1200              */
1201             qemu_log_mask(CPU_LOG_INT,
1202                           "...STKOF during stacking\n");
1203             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
1204             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1205                                     env->v7m.secure);
1206             env->regs[13] = limit;
1207             /*
1208              * We won't try to perform any further memory accesses but
1209              * we must continue through the following code to check for
1210              * permission faults during FPU state preservation, and we
1211              * must update FPCCR if lazy stacking is enabled.
1212              */
1213             limitviol = true;
1214             stacked_ok = false;
1215         }
1216     }
1217 
1218     /*
1219      * Write as much of the stack frame as we can. If we fail a stack
1220      * write this will result in a derived exception being pended
1221      * (which may be taken in preference to the one we started with
1222      * if it has higher priority).
1223      */
1224     stacked_ok = stacked_ok &&
1225         v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
1226         v7m_stack_write(cpu, frameptr + 4, env->regs[1],
1227                         mmu_idx, STACK_NORMAL) &&
1228         v7m_stack_write(cpu, frameptr + 8, env->regs[2],
1229                         mmu_idx, STACK_NORMAL) &&
1230         v7m_stack_write(cpu, frameptr + 12, env->regs[3],
1231                         mmu_idx, STACK_NORMAL) &&
1232         v7m_stack_write(cpu, frameptr + 16, env->regs[12],
1233                         mmu_idx, STACK_NORMAL) &&
1234         v7m_stack_write(cpu, frameptr + 20, env->regs[14],
1235                         mmu_idx, STACK_NORMAL) &&
1236         v7m_stack_write(cpu, frameptr + 24, env->regs[15],
1237                         mmu_idx, STACK_NORMAL) &&
1238         v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
1239 
1240     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
1241         /* FPU is active, try to save its registers */
1242         bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1243         bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
1244 
1245         if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1246             qemu_log_mask(CPU_LOG_INT,
1247                           "...SecureFault because LSPACT and FPCA both set\n");
1248             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1249             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1250         } else if (!env->v7m.secure && !nsacr_cp10) {
1251             qemu_log_mask(CPU_LOG_INT,
1252                           "...Secure UsageFault with CFSR.NOCP because "
1253                           "NSACR.CP10 prevents stacking FP regs\n");
1254             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
1255             env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1256         } else {
1257             if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1258                 /* Lazy stacking disabled, save registers now */
1259                 int i;
1260                 bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
1261                                                  arm_current_el(env) != 0);
1262 
1263                 if (stacked_ok && !cpacr_pass) {
1264                     /*
1265                      * Take UsageFault if CPACR forbids access. The pseudocode
1266                      * here does a full CheckCPEnabled() but we know the NSACR
1267                      * check can never fail as we have already handled that.
1268                      */
1269                     qemu_log_mask(CPU_LOG_INT,
1270                                   "...UsageFault with CFSR.NOCP because "
1271                                   "CPACR.CP10 prevents stacking FP regs\n");
1272                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1273                                             env->v7m.secure);
1274                     env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
1275                     stacked_ok = false;
1276                 }
1277 
1278                 for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1279                     uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1280                     uint32_t faddr = frameptr + 0x20 + 4 * i;
1281                     uint32_t slo = extract64(dn, 0, 32);
1282                     uint32_t shi = extract64(dn, 32, 32);
1283 
1284                     if (i >= 16) {
1285                         faddr += 8; /* skip the slot for the FPSCR */
1286                     }
1287                     stacked_ok = stacked_ok &&
1288                         v7m_stack_write(cpu, faddr, slo,
1289                                         mmu_idx, STACK_NORMAL) &&
1290                         v7m_stack_write(cpu, faddr + 4, shi,
1291                                         mmu_idx, STACK_NORMAL);
1292                 }
1293                 stacked_ok = stacked_ok &&
1294                     v7m_stack_write(cpu, frameptr + 0x60,
1295                                     vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
1296                 if (cpacr_pass) {
1297                     for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1298                         *aa32_vfp_dreg(env, i / 2) = 0;
1299                     }
1300                     vfp_set_fpscr(env, 0);
1301                 }
1302             } else {
1303                 /* Lazy stacking enabled, save necessary info to stack later */
1304                 v7m_update_fpccr(env, frameptr + 0x20, true);
1305             }
1306         }
1307     }
1308 
1309     /*
1310      * If we broke a stack limit then SP was already updated earlier;
1311      * otherwise we update SP regardless of whether any of the stack
1312      * accesses failed or we took some other kind of fault.
1313      */
1314     if (!limitviol) {
1315         env->regs[13] = frameptr;
1316     }
1317 
1318     return !stacked_ok;
1319 }
1320 
do_v7m_exception_exit(ARMCPU * cpu)1321 static void do_v7m_exception_exit(ARMCPU *cpu)
1322 {
1323     CPUARMState *env = &cpu->env;
1324     uint32_t excret;
1325     uint32_t xpsr, xpsr_mask;
1326     bool ufault = false;
1327     bool sfault = false;
1328     bool return_to_sp_process;
1329     bool return_to_handler;
1330     bool rettobase = false;
1331     bool exc_secure = false;
1332     bool return_to_secure;
1333     bool ftype;
1334     bool restore_s16_s31;
1335 
1336     /*
1337      * If we're not in Handler mode then jumps to magic exception-exit
1338      * addresses don't have magic behaviour. However for the v8M
1339      * security extensions the magic secure-function-return has to
1340      * work in thread mode too, so to avoid doing an extra check in
1341      * the generated code we allow exception-exit magic to also cause the
1342      * internal exception and bring us here in thread mode. Correct code
1343      * will never try to do this (the following insn fetch will always
1344      * fault) so we the overhead of having taken an unnecessary exception
1345      * doesn't matter.
1346      */
1347     if (!arm_v7m_is_handler_mode(env)) {
1348         return;
1349     }
1350 
1351     /*
1352      * In the spec pseudocode ExceptionReturn() is called directly
1353      * from BXWritePC() and gets the full target PC value including
1354      * bit zero. In QEMU's implementation we treat it as a normal
1355      * jump-to-register (which is then caught later on), and so split
1356      * the target value up between env->regs[15] and env->thumb in
1357      * gen_bx(). Reconstitute it.
1358      */
1359     excret = env->regs[15];
1360     if (env->thumb) {
1361         excret |= 1;
1362     }
1363 
1364     qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
1365                   " previous exception %d\n",
1366                   excret, env->v7m.exception);
1367 
1368     if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
1369         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
1370                       "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
1371                       excret);
1372     }
1373 
1374     ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
1375 
1376     if (!arm_feature(env, ARM_FEATURE_VFP) && !ftype) {
1377         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
1378                       "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
1379                       "if FPU not present\n",
1380                       excret);
1381         ftype = true;
1382     }
1383 
1384     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1385         /*
1386          * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1387          * we pick which FAULTMASK to clear.
1388          */
1389         if (!env->v7m.secure &&
1390             ((excret & R_V7M_EXCRET_ES_MASK) ||
1391              !(excret & R_V7M_EXCRET_DCRS_MASK))) {
1392             sfault = 1;
1393             /* For all other purposes, treat ES as 0 (R_HXSR) */
1394             excret &= ~R_V7M_EXCRET_ES_MASK;
1395         }
1396         exc_secure = excret & R_V7M_EXCRET_ES_MASK;
1397     }
1398 
1399     if (env->v7m.exception != ARMV7M_EXCP_NMI) {
1400         /*
1401          * Auto-clear FAULTMASK on return from other than NMI.
1402          * If the security extension is implemented then this only
1403          * happens if the raw execution priority is >= 0; the
1404          * value of the ES bit in the exception return value indicates
1405          * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1406          */
1407         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1408             if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
1409                 env->v7m.faultmask[exc_secure] = 0;
1410             }
1411         } else {
1412             env->v7m.faultmask[M_REG_NS] = 0;
1413         }
1414     }
1415 
1416     switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
1417                                      exc_secure)) {
1418     case -1:
1419         /* attempt to exit an exception that isn't active */
1420         ufault = true;
1421         break;
1422     case 0:
1423         /* still an irq active now */
1424         break;
1425     case 1:
1426         /*
1427          * We returned to base exception level, no nesting.
1428          * (In the pseudocode this is written using "NestedActivation != 1"
1429          * where we have 'rettobase == false'.)
1430          */
1431         rettobase = true;
1432         break;
1433     default:
1434         g_assert_not_reached();
1435     }
1436 
1437     return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
1438     return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
1439     return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
1440         (excret & R_V7M_EXCRET_S_MASK);
1441 
1442     if (arm_feature(env, ARM_FEATURE_V8)) {
1443         if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1444             /*
1445              * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1446              * we choose to take the UsageFault.
1447              */
1448             if ((excret & R_V7M_EXCRET_S_MASK) ||
1449                 (excret & R_V7M_EXCRET_ES_MASK) ||
1450                 !(excret & R_V7M_EXCRET_DCRS_MASK)) {
1451                 ufault = true;
1452             }
1453         }
1454         if (excret & R_V7M_EXCRET_RES0_MASK) {
1455             ufault = true;
1456         }
1457     } else {
1458         /* For v7M we only recognize certain combinations of the low bits */
1459         switch (excret & 0xf) {
1460         case 1: /* Return to Handler */
1461             break;
1462         case 13: /* Return to Thread using Process stack */
1463         case 9: /* Return to Thread using Main stack */
1464             /*
1465              * We only need to check NONBASETHRDENA for v7M, because in
1466              * v8M this bit does not exist (it is RES1).
1467              */
1468             if (!rettobase &&
1469                 !(env->v7m.ccr[env->v7m.secure] &
1470                   R_V7M_CCR_NONBASETHRDENA_MASK)) {
1471                 ufault = true;
1472             }
1473             break;
1474         default:
1475             ufault = true;
1476         }
1477     }
1478 
1479     /*
1480      * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1481      * Handler mode (and will be until we write the new XPSR.Interrupt
1482      * field) this does not switch around the current stack pointer.
1483      * We must do this before we do any kind of tailchaining, including
1484      * for the derived exceptions on integrity check failures, or we will
1485      * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1486      */
1487     write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
1488 
1489     /*
1490      * Clear scratch FP values left in caller saved registers; this
1491      * must happen before any kind of tail chaining.
1492      */
1493     if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
1494         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
1495         if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1496             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1497             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1498             qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1499                           "stackframe: error during lazy state deactivation\n");
1500             v7m_exception_taken(cpu, excret, true, false);
1501             return;
1502         } else {
1503             /* Clear s0..s15 and FPSCR */
1504             int i;
1505 
1506             for (i = 0; i < 16; i += 2) {
1507                 *aa32_vfp_dreg(env, i / 2) = 0;
1508             }
1509             vfp_set_fpscr(env, 0);
1510         }
1511     }
1512 
1513     if (sfault) {
1514         env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
1515         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1516         qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1517                       "stackframe: failed EXC_RETURN.ES validity check\n");
1518         v7m_exception_taken(cpu, excret, true, false);
1519         return;
1520     }
1521 
1522     if (ufault) {
1523         /*
1524          * Bad exception return: instead of popping the exception
1525          * stack, directly take a usage fault on the current stack.
1526          */
1527         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1528         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1529         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1530                       "stackframe: failed exception return integrity check\n");
1531         v7m_exception_taken(cpu, excret, true, false);
1532         return;
1533     }
1534 
1535     /*
1536      * Tailchaining: if there is currently a pending exception that
1537      * is high enough priority to preempt execution at the level we're
1538      * about to return to, then just directly take that exception now,
1539      * avoiding an unstack-and-then-stack. Note that now we have
1540      * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1541      * our current execution priority is already the execution priority we are
1542      * returning to -- none of the state we would unstack or set based on
1543      * the EXCRET value affects it.
1544      */
1545     if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
1546         qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
1547         v7m_exception_taken(cpu, excret, true, false);
1548         return;
1549     }
1550 
1551     switch_v7m_security_state(env, return_to_secure);
1552 
1553     {
1554         /*
1555          * The stack pointer we should be reading the exception frame from
1556          * depends on bits in the magic exception return type value (and
1557          * for v8M isn't necessarily the stack pointer we will eventually
1558          * end up resuming execution with). Get a pointer to the location
1559          * in the CPU state struct where the SP we need is currently being
1560          * stored; we will use and modify it in place.
1561          * We use this limited C variable scope so we don't accidentally
1562          * use 'frame_sp_p' after we do something that makes it invalid.
1563          */
1564         uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
1565                                               return_to_secure,
1566                                               !return_to_handler,
1567                                               return_to_sp_process);
1568         uint32_t frameptr = *frame_sp_p;
1569         bool pop_ok = true;
1570         ARMMMUIdx mmu_idx;
1571         bool return_to_priv = return_to_handler ||
1572             !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
1573 
1574         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
1575                                                         return_to_priv);
1576 
1577         if (!QEMU_IS_ALIGNED(frameptr, 8) &&
1578             arm_feature(env, ARM_FEATURE_V8)) {
1579             qemu_log_mask(LOG_GUEST_ERROR,
1580                           "M profile exception return with non-8-aligned SP "
1581                           "for destination state is UNPREDICTABLE\n");
1582         }
1583 
1584         /* Do we need to pop callee-saved registers? */
1585         if (return_to_secure &&
1586             ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
1587              (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
1588             uint32_t actual_sig;
1589 
1590             pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
1591 
1592             if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
1593                 /* Take a SecureFault on the current stack */
1594                 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
1595                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1596                 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1597                               "stackframe: failed exception return integrity "
1598                               "signature check\n");
1599                 v7m_exception_taken(cpu, excret, true, false);
1600                 return;
1601             }
1602 
1603             pop_ok = pop_ok &&
1604                 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
1605                 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
1606                 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
1607                 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
1608                 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
1609                 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
1610                 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
1611                 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
1612 
1613             frameptr += 0x28;
1614         }
1615 
1616         /* Pop registers */
1617         pop_ok = pop_ok &&
1618             v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
1619             v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
1620             v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
1621             v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
1622             v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
1623             v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
1624             v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
1625             v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
1626 
1627         if (!pop_ok) {
1628             /*
1629              * v7m_stack_read() pended a fault, so take it (as a tail
1630              * chained exception on the same stack frame)
1631              */
1632             qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
1633             v7m_exception_taken(cpu, excret, true, false);
1634             return;
1635         }
1636 
1637         /*
1638          * Returning from an exception with a PC with bit 0 set is defined
1639          * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1640          * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1641          * the lsbit, and there are several RTOSes out there which incorrectly
1642          * assume the r15 in the stack frame should be a Thumb-style "lsbit
1643          * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1644          * complain about the badly behaved guest.
1645          */
1646         if (env->regs[15] & 1) {
1647             env->regs[15] &= ~1U;
1648             if (!arm_feature(env, ARM_FEATURE_V8)) {
1649                 qemu_log_mask(LOG_GUEST_ERROR,
1650                               "M profile return from interrupt with misaligned "
1651                               "PC is UNPREDICTABLE on v7M\n");
1652             }
1653         }
1654 
1655         if (arm_feature(env, ARM_FEATURE_V8)) {
1656             /*
1657              * For v8M we have to check whether the xPSR exception field
1658              * matches the EXCRET value for return to handler/thread
1659              * before we commit to changing the SP and xPSR.
1660              */
1661             bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
1662             if (return_to_handler != will_be_handler) {
1663                 /*
1664                  * Take an INVPC UsageFault on the current stack.
1665                  * By this point we will have switched to the security state
1666                  * for the background state, so this UsageFault will target
1667                  * that state.
1668                  */
1669                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1670                                         env->v7m.secure);
1671                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1672                 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1673                               "stackframe: failed exception return integrity "
1674                               "check\n");
1675                 v7m_exception_taken(cpu, excret, true, false);
1676                 return;
1677             }
1678         }
1679 
1680         if (!ftype) {
1681             /* FP present and we need to handle it */
1682             if (!return_to_secure &&
1683                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
1684                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1685                 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1686                 qemu_log_mask(CPU_LOG_INT,
1687                               "...taking SecureFault on existing stackframe: "
1688                               "Secure LSPACT set but exception return is "
1689                               "not to secure state\n");
1690                 v7m_exception_taken(cpu, excret, true, false);
1691                 return;
1692             }
1693 
1694             restore_s16_s31 = return_to_secure &&
1695                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
1696 
1697             if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
1698                 /* State in FPU is still valid, just clear LSPACT */
1699                 env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
1700             } else {
1701                 int i;
1702                 uint32_t fpscr;
1703                 bool cpacr_pass, nsacr_pass;
1704 
1705                 cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
1706                                             return_to_priv);
1707                 nsacr_pass = return_to_secure ||
1708                     extract32(env->v7m.nsacr, 10, 1);
1709 
1710                 if (!cpacr_pass) {
1711                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1712                                             return_to_secure);
1713                     env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
1714                     qemu_log_mask(CPU_LOG_INT,
1715                                   "...taking UsageFault on existing "
1716                                   "stackframe: CPACR.CP10 prevents unstacking "
1717                                   "FP regs\n");
1718                     v7m_exception_taken(cpu, excret, true, false);
1719                     return;
1720                 } else if (!nsacr_pass) {
1721                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1722                     env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
1723                     qemu_log_mask(CPU_LOG_INT,
1724                                   "...taking Secure UsageFault on existing "
1725                                   "stackframe: NSACR.CP10 prevents unstacking "
1726                                   "FP regs\n");
1727                     v7m_exception_taken(cpu, excret, true, false);
1728                     return;
1729                 }
1730 
1731                 for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1732                     uint32_t slo, shi;
1733                     uint64_t dn;
1734                     uint32_t faddr = frameptr + 0x20 + 4 * i;
1735 
1736                     if (i >= 16) {
1737                         faddr += 8; /* Skip the slot for the FPSCR */
1738                     }
1739 
1740                     pop_ok = pop_ok &&
1741                         v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
1742                         v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
1743 
1744                     if (!pop_ok) {
1745                         break;
1746                     }
1747 
1748                     dn = (uint64_t)shi << 32 | slo;
1749                     *aa32_vfp_dreg(env, i / 2) = dn;
1750                 }
1751                 pop_ok = pop_ok &&
1752                     v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
1753                 if (pop_ok) {
1754                     vfp_set_fpscr(env, fpscr);
1755                 }
1756                 if (!pop_ok) {
1757                     /*
1758                      * These regs are 0 if security extension present;
1759                      * otherwise merely UNKNOWN. We zero always.
1760                      */
1761                     for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1762                         *aa32_vfp_dreg(env, i / 2) = 0;
1763                     }
1764                     vfp_set_fpscr(env, 0);
1765                 }
1766             }
1767         }
1768         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1769                                                V7M_CONTROL, FPCA, !ftype);
1770 
1771         /* Commit to consuming the stack frame */
1772         frameptr += 0x20;
1773         if (!ftype) {
1774             frameptr += 0x48;
1775             if (restore_s16_s31) {
1776                 frameptr += 0x40;
1777             }
1778         }
1779         /*
1780          * Undo stack alignment (the SPREALIGN bit indicates that the original
1781          * pre-exception SP was not 8-aligned and we added a padding word to
1782          * align it, so we undo this by ORing in the bit that increases it
1783          * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1784          * would work too but a logical OR is how the pseudocode specifies it.)
1785          */
1786         if (xpsr & XPSR_SPREALIGN) {
1787             frameptr |= 4;
1788         }
1789         *frame_sp_p = frameptr;
1790     }
1791 
1792     xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
1793     if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
1794         xpsr_mask &= ~XPSR_GE;
1795     }
1796     /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1797     xpsr_write(env, xpsr, xpsr_mask);
1798 
1799     if (env->v7m.secure) {
1800         bool sfpa = xpsr & XPSR_SFPA;
1801 
1802         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1803                                                V7M_CONTROL, SFPA, sfpa);
1804     }
1805 
1806     /*
1807      * The restored xPSR exception field will be zero if we're
1808      * resuming in Thread mode. If that doesn't match what the
1809      * exception return excret specified then this is a UsageFault.
1810      * v7M requires we make this check here; v8M did it earlier.
1811      */
1812     if (return_to_handler != arm_v7m_is_handler_mode(env)) {
1813         /*
1814          * Take an INVPC UsageFault by pushing the stack again;
1815          * we know we're v7M so this is never a Secure UsageFault.
1816          */
1817         bool ignore_stackfaults;
1818 
1819         assert(!arm_feature(env, ARM_FEATURE_V8));
1820         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
1821         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1822         ignore_stackfaults = v7m_push_stack(cpu);
1823         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
1824                       "failed exception return integrity check\n");
1825         v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
1826         return;
1827     }
1828 
1829     /* Otherwise, we have a successful exception exit. */
1830     arm_clear_exclusive(env);
1831     arm_rebuild_hflags(env);
1832     qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
1833 }
1834 
do_v7m_function_return(ARMCPU * cpu)1835 static bool do_v7m_function_return(ARMCPU *cpu)
1836 {
1837     /*
1838      * v8M security extensions magic function return.
1839      * We may either:
1840      *  (1) throw an exception (longjump)
1841      *  (2) return true if we successfully handled the function return
1842      *  (3) return false if we failed a consistency check and have
1843      *      pended a UsageFault that needs to be taken now
1844      *
1845      * At this point the magic return value is split between env->regs[15]
1846      * and env->thumb. We don't bother to reconstitute it because we don't
1847      * need it (all values are handled the same way).
1848      */
1849     CPUARMState *env = &cpu->env;
1850     uint32_t newpc, newpsr, newpsr_exc;
1851 
1852     qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
1853 
1854     {
1855         bool threadmode, spsel;
1856         TCGMemOpIdx oi;
1857         ARMMMUIdx mmu_idx;
1858         uint32_t *frame_sp_p;
1859         uint32_t frameptr;
1860 
1861         /* Pull the return address and IPSR from the Secure stack */
1862         threadmode = !arm_v7m_is_handler_mode(env);
1863         spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
1864 
1865         frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
1866         frameptr = *frame_sp_p;
1867 
1868         /*
1869          * These loads may throw an exception (for MPU faults). We want to
1870          * do them as secure, so work out what MMU index that is.
1871          */
1872         mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1873         oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
1874         newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
1875         newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
1876 
1877         /* Consistency checks on new IPSR */
1878         newpsr_exc = newpsr & XPSR_EXCP;
1879         if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
1880               (env->v7m.exception == 1 && newpsr_exc != 0))) {
1881             /* Pend the fault and tell our caller to take it */
1882             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1883             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1884                                     env->v7m.secure);
1885             qemu_log_mask(CPU_LOG_INT,
1886                           "...taking INVPC UsageFault: "
1887                           "IPSR consistency check failed\n");
1888             return false;
1889         }
1890 
1891         *frame_sp_p = frameptr + 8;
1892     }
1893 
1894     /* This invalidates frame_sp_p */
1895     switch_v7m_security_state(env, true);
1896     env->v7m.exception = newpsr_exc;
1897     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1898     if (newpsr & XPSR_SFPA) {
1899         env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
1900     }
1901     xpsr_write(env, 0, XPSR_IT);
1902     env->thumb = newpc & 1;
1903     env->regs[15] = newpc & ~1;
1904     arm_rebuild_hflags(env);
1905 
1906     qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
1907     return true;
1908 }
1909 
v7m_read_half_insn(ARMCPU * cpu,ARMMMUIdx mmu_idx,uint32_t addr,uint16_t * insn)1910 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1911                                uint32_t addr, uint16_t *insn)
1912 {
1913     /*
1914      * Load a 16-bit portion of a v7M instruction, returning true on success,
1915      * or false on failure (in which case we will have pended the appropriate
1916      * exception).
1917      * We need to do the instruction fetch's MPU and SAU checks
1918      * like this because there is no MMU index that would allow
1919      * doing the load with a single function call. Instead we must
1920      * first check that the security attributes permit the load
1921      * and that they don't mismatch on the two halves of the instruction,
1922      * and then we do the load as a secure load (ie using the security
1923      * attributes of the address, not the CPU, as architecturally required).
1924      */
1925     CPUState *cs = CPU(cpu);
1926     CPUARMState *env = &cpu->env;
1927     V8M_SAttributes sattrs = {};
1928     MemTxAttrs attrs = {};
1929     ARMMMUFaultInfo fi = {};
1930     MemTxResult txres;
1931     target_ulong page_size;
1932     hwaddr physaddr;
1933     int prot;
1934 
1935     v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
1936     if (!sattrs.nsc || sattrs.ns) {
1937         /*
1938          * This must be the second half of the insn, and it straddles a
1939          * region boundary with the second half not being S&NSC.
1940          */
1941         env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
1942         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1943         qemu_log_mask(CPU_LOG_INT,
1944                       "...really SecureFault with SFSR.INVEP\n");
1945         return false;
1946     }
1947     if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
1948                       &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
1949         /* the MPU lookup failed */
1950         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
1951         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
1952         qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
1953         return false;
1954     }
1955     *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
1956                                  attrs, &txres);
1957     if (txres != MEMTX_OK) {
1958         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
1959         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
1960         qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
1961         return false;
1962     }
1963     return true;
1964 }
1965 
v7m_handle_execute_nsc(ARMCPU * cpu)1966 static bool v7m_handle_execute_nsc(ARMCPU *cpu)
1967 {
1968     /*
1969      * Check whether this attempt to execute code in a Secure & NS-Callable
1970      * memory region is for an SG instruction; if so, then emulate the
1971      * effect of the SG instruction and return true. Otherwise pend
1972      * the correct kind of exception and return false.
1973      */
1974     CPUARMState *env = &cpu->env;
1975     ARMMMUIdx mmu_idx;
1976     uint16_t insn;
1977 
1978     /*
1979      * We should never get here unless get_phys_addr_pmsav8() caused
1980      * an exception for NS executing in S&NSC memory.
1981      */
1982     assert(!env->v7m.secure);
1983     assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
1984 
1985     /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
1986     mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1987 
1988     if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
1989         return false;
1990     }
1991 
1992     if (!env->thumb) {
1993         goto gen_invep;
1994     }
1995 
1996     if (insn != 0xe97f) {
1997         /*
1998          * Not an SG instruction first half (we choose the IMPDEF
1999          * early-SG-check option).
2000          */
2001         goto gen_invep;
2002     }
2003 
2004     if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
2005         return false;
2006     }
2007 
2008     if (insn != 0xe97f) {
2009         /*
2010          * Not an SG instruction second half (yes, both halves of the SG
2011          * insn have the same hex value)
2012          */
2013         goto gen_invep;
2014     }
2015 
2016     /*
2017      * OK, we have confirmed that we really have an SG instruction.
2018      * We know we're NS in S memory so don't need to repeat those checks.
2019      */
2020     qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
2021                   ", executing it\n", env->regs[15]);
2022     env->regs[14] &= ~1;
2023     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2024     switch_v7m_security_state(env, true);
2025     xpsr_write(env, 0, XPSR_IT);
2026     env->regs[15] += 4;
2027     arm_rebuild_hflags(env);
2028     return true;
2029 
2030 gen_invep:
2031     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2032     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2033     qemu_log_mask(CPU_LOG_INT,
2034                   "...really SecureFault with SFSR.INVEP\n");
2035     return false;
2036 }
2037 
arm_v7m_cpu_do_interrupt(CPUState * cs)2038 void arm_v7m_cpu_do_interrupt(CPUState *cs)
2039 {
2040     ARMCPU *cpu = ARM_CPU(cs);
2041     CPUARMState *env = &cpu->env;
2042     uint32_t lr;
2043     bool ignore_stackfaults;
2044 
2045     arm_log_exception(cs->exception_index);
2046 
2047     /*
2048      * For exceptions we just mark as pending on the NVIC, and let that
2049      * handle it.
2050      */
2051     switch (cs->exception_index) {
2052     case EXCP_UDEF:
2053         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2054         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
2055         break;
2056     case EXCP_NOCP:
2057     {
2058         /*
2059          * NOCP might be directed to something other than the current
2060          * security state if this fault is because of NSACR; we indicate
2061          * the target security state using exception.target_el.
2062          */
2063         int target_secstate;
2064 
2065         if (env->exception.target_el == 3) {
2066             target_secstate = M_REG_S;
2067         } else {
2068             target_secstate = env->v7m.secure;
2069         }
2070         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
2071         env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
2072         break;
2073     }
2074     case EXCP_INVSTATE:
2075         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2076         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
2077         break;
2078     case EXCP_STKOF:
2079         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2080         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
2081         break;
2082     case EXCP_LSERR:
2083         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2084         env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
2085         break;
2086     case EXCP_UNALIGNED:
2087         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2088         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2089         break;
2090     case EXCP_SWI:
2091         /* The PC already points to the next instruction.  */
2092         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
2093         break;
2094     case EXCP_PREFETCH_ABORT:
2095     case EXCP_DATA_ABORT:
2096         /*
2097          * Note that for M profile we don't have a guest facing FSR, but
2098          * the env->exception.fsr will be populated by the code that
2099          * raises the fault, in the A profile short-descriptor format.
2100          */
2101         switch (env->exception.fsr & 0xf) {
2102         case M_FAKE_FSR_NSC_EXEC:
2103             /*
2104              * Exception generated when we try to execute code at an address
2105              * which is marked as Secure & Non-Secure Callable and the CPU
2106              * is in the Non-Secure state. The only instruction which can
2107              * be executed like this is SG (and that only if both halves of
2108              * the SG instruction have the same security attributes.)
2109              * Everything else must generate an INVEP SecureFault, so we
2110              * emulate the SG instruction here.
2111              */
2112             if (v7m_handle_execute_nsc(cpu)) {
2113                 return;
2114             }
2115             break;
2116         case M_FAKE_FSR_SFAULT:
2117             /*
2118              * Various flavours of SecureFault for attempts to execute or
2119              * access data in the wrong security state.
2120              */
2121             switch (cs->exception_index) {
2122             case EXCP_PREFETCH_ABORT:
2123                 if (env->v7m.secure) {
2124                     env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
2125                     qemu_log_mask(CPU_LOG_INT,
2126                                   "...really SecureFault with SFSR.INVTRAN\n");
2127                 } else {
2128                     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2129                     qemu_log_mask(CPU_LOG_INT,
2130                                   "...really SecureFault with SFSR.INVEP\n");
2131                 }
2132                 break;
2133             case EXCP_DATA_ABORT:
2134                 /* This must be an NS access to S memory */
2135                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2136                 qemu_log_mask(CPU_LOG_INT,
2137                               "...really SecureFault with SFSR.AUVIOL\n");
2138                 break;
2139             }
2140             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2141             break;
2142         case 0x8: /* External Abort */
2143             switch (cs->exception_index) {
2144             case EXCP_PREFETCH_ABORT:
2145                 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2146                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
2147                 break;
2148             case EXCP_DATA_ABORT:
2149                 env->v7m.cfsr[M_REG_NS] |=
2150                     (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2151                 env->v7m.bfar = env->exception.vaddress;
2152                 qemu_log_mask(CPU_LOG_INT,
2153                               "...with CFSR.PRECISERR and BFAR 0x%x\n",
2154                               env->v7m.bfar);
2155                 break;
2156             }
2157             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2158             break;
2159         default:
2160             /*
2161              * All other FSR values are either MPU faults or "can't happen
2162              * for M profile" cases.
2163              */
2164             switch (cs->exception_index) {
2165             case EXCP_PREFETCH_ABORT:
2166                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2167                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
2168                 break;
2169             case EXCP_DATA_ABORT:
2170                 env->v7m.cfsr[env->v7m.secure] |=
2171                     (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
2172                 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
2173                 qemu_log_mask(CPU_LOG_INT,
2174                               "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2175                               env->v7m.mmfar[env->v7m.secure]);
2176                 break;
2177             }
2178             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
2179                                     env->v7m.secure);
2180             break;
2181         }
2182         break;
2183     case EXCP_SEMIHOST:
2184         qemu_log_mask(CPU_LOG_INT,
2185                       "...handling as semihosting call 0x%x\n",
2186                       env->regs[0]);
2187         env->regs[0] = do_arm_semihosting(env);
2188         return;
2189     case EXCP_BKPT:
2190         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
2191         break;
2192     case EXCP_IRQ:
2193         break;
2194     case EXCP_EXCEPTION_EXIT:
2195         if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
2196             /* Must be v8M security extension function return */
2197             assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
2198             assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2199             if (do_v7m_function_return(cpu)) {
2200                 return;
2201             }
2202         } else {
2203             do_v7m_exception_exit(cpu);
2204             return;
2205         }
2206         break;
2207     case EXCP_LAZYFP:
2208         /*
2209          * We already pended the specific exception in the NVIC in the
2210          * v7m_preserve_fp_state() helper function.
2211          */
2212         break;
2213     default:
2214         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
2215         return; /* Never happens.  Keep compiler happy.  */
2216     }
2217 
2218     if (arm_feature(env, ARM_FEATURE_V8)) {
2219         lr = R_V7M_EXCRET_RES1_MASK |
2220             R_V7M_EXCRET_DCRS_MASK;
2221         /*
2222          * The S bit indicates whether we should return to Secure
2223          * or NonSecure (ie our current state).
2224          * The ES bit indicates whether we're taking this exception
2225          * to Secure or NonSecure (ie our target state). We set it
2226          * later, in v7m_exception_taken().
2227          * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2228          * This corresponds to the ARM ARM pseudocode for v8M setting
2229          * some LR bits in PushStack() and some in ExceptionTaken();
2230          * the distinction matters for the tailchain cases where we
2231          * can take an exception without pushing the stack.
2232          */
2233         if (env->v7m.secure) {
2234             lr |= R_V7M_EXCRET_S_MASK;
2235         }
2236     } else {
2237         lr = R_V7M_EXCRET_RES1_MASK |
2238             R_V7M_EXCRET_S_MASK |
2239             R_V7M_EXCRET_DCRS_MASK |
2240             R_V7M_EXCRET_ES_MASK;
2241         if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
2242             lr |= R_V7M_EXCRET_SPSEL_MASK;
2243         }
2244     }
2245     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
2246         lr |= R_V7M_EXCRET_FTYPE_MASK;
2247     }
2248     if (!arm_v7m_is_handler_mode(env)) {
2249         lr |= R_V7M_EXCRET_MODE_MASK;
2250     }
2251 
2252     ignore_stackfaults = v7m_push_stack(cpu);
2253     v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
2254 }
2255 
HELPER(v7m_mrs)2256 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2257 {
2258     unsigned el = arm_current_el(env);
2259 
2260     /* First handle registers which unprivileged can read */
2261     switch (reg) {
2262     case 0 ... 7: /* xPSR sub-fields */
2263         return v7m_mrs_xpsr(env, reg, el);
2264     case 20: /* CONTROL */
2265         return v7m_mrs_control(env, env->v7m.secure);
2266     case 0x94: /* CONTROL_NS */
2267         /*
2268          * We have to handle this here because unprivileged Secure code
2269          * can read the NS CONTROL register.
2270          */
2271         if (!env->v7m.secure) {
2272             return 0;
2273         }
2274         return env->v7m.control[M_REG_NS] |
2275             (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
2276     }
2277 
2278     if (el == 0) {
2279         return 0; /* unprivileged reads others as zero */
2280     }
2281 
2282     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2283         switch (reg) {
2284         case 0x88: /* MSP_NS */
2285             if (!env->v7m.secure) {
2286                 return 0;
2287             }
2288             return env->v7m.other_ss_msp;
2289         case 0x89: /* PSP_NS */
2290             if (!env->v7m.secure) {
2291                 return 0;
2292             }
2293             return env->v7m.other_ss_psp;
2294         case 0x8a: /* MSPLIM_NS */
2295             if (!env->v7m.secure) {
2296                 return 0;
2297             }
2298             return env->v7m.msplim[M_REG_NS];
2299         case 0x8b: /* PSPLIM_NS */
2300             if (!env->v7m.secure) {
2301                 return 0;
2302             }
2303             return env->v7m.psplim[M_REG_NS];
2304         case 0x90: /* PRIMASK_NS */
2305             if (!env->v7m.secure) {
2306                 return 0;
2307             }
2308             return env->v7m.primask[M_REG_NS];
2309         case 0x91: /* BASEPRI_NS */
2310             if (!env->v7m.secure) {
2311                 return 0;
2312             }
2313             return env->v7m.basepri[M_REG_NS];
2314         case 0x93: /* FAULTMASK_NS */
2315             if (!env->v7m.secure) {
2316                 return 0;
2317             }
2318             return env->v7m.faultmask[M_REG_NS];
2319         case 0x98: /* SP_NS */
2320         {
2321             /*
2322              * This gives the non-secure SP selected based on whether we're
2323              * currently in handler mode or not, using the NS CONTROL.SPSEL.
2324              */
2325             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2326 
2327             if (!env->v7m.secure) {
2328                 return 0;
2329             }
2330             if (!arm_v7m_is_handler_mode(env) && spsel) {
2331                 return env->v7m.other_ss_psp;
2332             } else {
2333                 return env->v7m.other_ss_msp;
2334             }
2335         }
2336         default:
2337             break;
2338         }
2339     }
2340 
2341     switch (reg) {
2342     case 8: /* MSP */
2343         return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
2344     case 9: /* PSP */
2345         return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
2346     case 10: /* MSPLIM */
2347         if (!arm_feature(env, ARM_FEATURE_V8)) {
2348             goto bad_reg;
2349         }
2350         return env->v7m.msplim[env->v7m.secure];
2351     case 11: /* PSPLIM */
2352         if (!arm_feature(env, ARM_FEATURE_V8)) {
2353             goto bad_reg;
2354         }
2355         return env->v7m.psplim[env->v7m.secure];
2356     case 16: /* PRIMASK */
2357         return env->v7m.primask[env->v7m.secure];
2358     case 17: /* BASEPRI */
2359     case 18: /* BASEPRI_MAX */
2360         return env->v7m.basepri[env->v7m.secure];
2361     case 19: /* FAULTMASK */
2362         return env->v7m.faultmask[env->v7m.secure];
2363     default:
2364     bad_reg:
2365         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
2366                                        " register %d\n", reg);
2367         return 0;
2368     }
2369 }
2370 
HELPER(v7m_msr)2371 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
2372 {
2373     /*
2374      * We're passed bits [11..0] of the instruction; extract
2375      * SYSm and the mask bits.
2376      * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2377      * we choose to treat them as if the mask bits were valid.
2378      * NB that the pseudocode 'mask' variable is bits [11..10],
2379      * whereas ours is [11..8].
2380      */
2381     uint32_t mask = extract32(maskreg, 8, 4);
2382     uint32_t reg = extract32(maskreg, 0, 8);
2383     int cur_el = arm_current_el(env);
2384 
2385     if (cur_el == 0 && reg > 7 && reg != 20) {
2386         /*
2387          * only xPSR sub-fields and CONTROL.SFPA may be written by
2388          * unprivileged code
2389          */
2390         return;
2391     }
2392 
2393     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2394         switch (reg) {
2395         case 0x88: /* MSP_NS */
2396             if (!env->v7m.secure) {
2397                 return;
2398             }
2399             env->v7m.other_ss_msp = val;
2400             return;
2401         case 0x89: /* PSP_NS */
2402             if (!env->v7m.secure) {
2403                 return;
2404             }
2405             env->v7m.other_ss_psp = val;
2406             return;
2407         case 0x8a: /* MSPLIM_NS */
2408             if (!env->v7m.secure) {
2409                 return;
2410             }
2411             env->v7m.msplim[M_REG_NS] = val & ~7;
2412             return;
2413         case 0x8b: /* PSPLIM_NS */
2414             if (!env->v7m.secure) {
2415                 return;
2416             }
2417             env->v7m.psplim[M_REG_NS] = val & ~7;
2418             return;
2419         case 0x90: /* PRIMASK_NS */
2420             if (!env->v7m.secure) {
2421                 return;
2422             }
2423             env->v7m.primask[M_REG_NS] = val & 1;
2424             return;
2425         case 0x91: /* BASEPRI_NS */
2426             if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2427                 return;
2428             }
2429             env->v7m.basepri[M_REG_NS] = val & 0xff;
2430             return;
2431         case 0x93: /* FAULTMASK_NS */
2432             if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2433                 return;
2434             }
2435             env->v7m.faultmask[M_REG_NS] = val & 1;
2436             return;
2437         case 0x94: /* CONTROL_NS */
2438             if (!env->v7m.secure) {
2439                 return;
2440             }
2441             write_v7m_control_spsel_for_secstate(env,
2442                                                  val & R_V7M_CONTROL_SPSEL_MASK,
2443                                                  M_REG_NS);
2444             if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
2445                 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
2446                 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
2447             }
2448             /*
2449              * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2450              * RES0 if the FPU is not present, and is stored in the S bank
2451              */
2452             if (arm_feature(env, ARM_FEATURE_VFP) &&
2453                 extract32(env->v7m.nsacr, 10, 1)) {
2454                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2455                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2456             }
2457             return;
2458         case 0x98: /* SP_NS */
2459         {
2460             /*
2461              * This gives the non-secure SP selected based on whether we're
2462              * currently in handler mode or not, using the NS CONTROL.SPSEL.
2463              */
2464             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2465             bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
2466             uint32_t limit;
2467 
2468             if (!env->v7m.secure) {
2469                 return;
2470             }
2471 
2472             limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
2473 
2474             if (val < limit) {
2475                 CPUState *cs = env_cpu(env);
2476 
2477                 cpu_restore_state(cs, GETPC(), true);
2478                 raise_exception(env, EXCP_STKOF, 0, 1);
2479             }
2480 
2481             if (is_psp) {
2482                 env->v7m.other_ss_psp = val;
2483             } else {
2484                 env->v7m.other_ss_msp = val;
2485             }
2486             return;
2487         }
2488         default:
2489             break;
2490         }
2491     }
2492 
2493     switch (reg) {
2494     case 0 ... 7: /* xPSR sub-fields */
2495         v7m_msr_xpsr(env, mask, reg, val);
2496         break;
2497     case 8: /* MSP */
2498         if (v7m_using_psp(env)) {
2499             env->v7m.other_sp = val;
2500         } else {
2501             env->regs[13] = val;
2502         }
2503         break;
2504     case 9: /* PSP */
2505         if (v7m_using_psp(env)) {
2506             env->regs[13] = val;
2507         } else {
2508             env->v7m.other_sp = val;
2509         }
2510         break;
2511     case 10: /* MSPLIM */
2512         if (!arm_feature(env, ARM_FEATURE_V8)) {
2513             goto bad_reg;
2514         }
2515         env->v7m.msplim[env->v7m.secure] = val & ~7;
2516         break;
2517     case 11: /* PSPLIM */
2518         if (!arm_feature(env, ARM_FEATURE_V8)) {
2519             goto bad_reg;
2520         }
2521         env->v7m.psplim[env->v7m.secure] = val & ~7;
2522         break;
2523     case 16: /* PRIMASK */
2524         env->v7m.primask[env->v7m.secure] = val & 1;
2525         break;
2526     case 17: /* BASEPRI */
2527         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2528             goto bad_reg;
2529         }
2530         env->v7m.basepri[env->v7m.secure] = val & 0xff;
2531         break;
2532     case 18: /* BASEPRI_MAX */
2533         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2534             goto bad_reg;
2535         }
2536         val &= 0xff;
2537         if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
2538                          || env->v7m.basepri[env->v7m.secure] == 0)) {
2539             env->v7m.basepri[env->v7m.secure] = val;
2540         }
2541         break;
2542     case 19: /* FAULTMASK */
2543         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2544             goto bad_reg;
2545         }
2546         env->v7m.faultmask[env->v7m.secure] = val & 1;
2547         break;
2548     case 20: /* CONTROL */
2549         /*
2550          * Writing to the SPSEL bit only has an effect if we are in
2551          * thread mode; other bits can be updated by any privileged code.
2552          * write_v7m_control_spsel() deals with updating the SPSEL bit in
2553          * env->v7m.control, so we only need update the others.
2554          * For v7M, we must just ignore explicit writes to SPSEL in handler
2555          * mode; for v8M the write is permitted but will have no effect.
2556          * All these bits are writes-ignored from non-privileged code,
2557          * except for SFPA.
2558          */
2559         if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
2560                            !arm_v7m_is_handler_mode(env))) {
2561             write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
2562         }
2563         if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
2564             env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
2565             env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
2566         }
2567         if (arm_feature(env, ARM_FEATURE_VFP)) {
2568             /*
2569              * SFPA is RAZ/WI from NS or if no FPU.
2570              * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2571              * Both are stored in the S bank.
2572              */
2573             if (env->v7m.secure) {
2574                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2575                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
2576             }
2577             if (cur_el > 0 &&
2578                 (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
2579                  extract32(env->v7m.nsacr, 10, 1))) {
2580                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2581                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2582             }
2583         }
2584         break;
2585     default:
2586     bad_reg:
2587         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
2588                                        " register %d\n", reg);
2589         return;
2590     }
2591 }
2592 
HELPER(v7m_tt)2593 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2594 {
2595     /* Implement the TT instruction. op is bits [7:6] of the insn. */
2596     bool forceunpriv = op & 1;
2597     bool alt = op & 2;
2598     V8M_SAttributes sattrs = {};
2599     uint32_t tt_resp;
2600     bool r, rw, nsr, nsrw, mrvalid;
2601     int prot;
2602     ARMMMUFaultInfo fi = {};
2603     MemTxAttrs attrs = {};
2604     hwaddr phys_addr;
2605     ARMMMUIdx mmu_idx;
2606     uint32_t mregion;
2607     bool targetpriv;
2608     bool targetsec = env->v7m.secure;
2609     bool is_subpage;
2610 
2611     /*
2612      * Work out what the security state and privilege level we're
2613      * interested in is...
2614      */
2615     if (alt) {
2616         targetsec = !targetsec;
2617     }
2618 
2619     if (forceunpriv) {
2620         targetpriv = false;
2621     } else {
2622         targetpriv = arm_v7m_is_handler_mode(env) ||
2623             !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
2624     }
2625 
2626     /* ...and then figure out which MMU index this is */
2627     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
2628 
2629     /*
2630      * We know that the MPU and SAU don't care about the access type
2631      * for our purposes beyond that we don't want to claim to be
2632      * an insn fetch, so we arbitrarily call this a read.
2633      */
2634 
2635     /*
2636      * MPU region info only available for privileged or if
2637      * inspecting the other MPU state.
2638      */
2639     if (arm_current_el(env) != 0 || alt) {
2640         /* We can ignore the return value as prot is always set */
2641         pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
2642                           &phys_addr, &attrs, &prot, &is_subpage,
2643                           &fi, &mregion);
2644         if (mregion == -1) {
2645             mrvalid = false;
2646             mregion = 0;
2647         } else {
2648             mrvalid = true;
2649         }
2650         r = prot & PAGE_READ;
2651         rw = prot & PAGE_WRITE;
2652     } else {
2653         r = false;
2654         rw = false;
2655         mrvalid = false;
2656         mregion = 0;
2657     }
2658 
2659     if (env->v7m.secure) {
2660         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
2661         nsr = sattrs.ns && r;
2662         nsrw = sattrs.ns && rw;
2663     } else {
2664         sattrs.ns = true;
2665         nsr = false;
2666         nsrw = false;
2667     }
2668 
2669     tt_resp = (sattrs.iregion << 24) |
2670         (sattrs.irvalid << 23) |
2671         ((!sattrs.ns) << 22) |
2672         (nsrw << 21) |
2673         (nsr << 20) |
2674         (rw << 19) |
2675         (r << 18) |
2676         (sattrs.srvalid << 17) |
2677         (mrvalid << 16) |
2678         (sattrs.sregion << 8) |
2679         mregion;
2680 
2681     return tt_resp;
2682 }
2683 
2684 #endif /* !CONFIG_USER_ONLY */
2685 
arm_v7m_mmu_idx_all(CPUARMState * env,bool secstate,bool priv,bool negpri)2686 ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
2687                               bool secstate, bool priv, bool negpri)
2688 {
2689     ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
2690 
2691     if (priv) {
2692         mmu_idx |= ARM_MMU_IDX_M_PRIV;
2693     }
2694 
2695     if (negpri) {
2696         mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
2697     }
2698 
2699     if (secstate) {
2700         mmu_idx |= ARM_MMU_IDX_M_S;
2701     }
2702 
2703     return mmu_idx;
2704 }
2705 
arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState * env,bool secstate,bool priv)2706 ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
2707                                                 bool secstate, bool priv)
2708 {
2709     bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
2710 
2711     return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
2712 }
2713 
2714 /* Return the MMU index for a v7M CPU in the specified security state */
arm_v7m_mmu_idx_for_secstate(CPUARMState * env,bool secstate)2715 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
2716 {
2717     bool priv = arm_current_el(env) != 0;
2718 
2719     return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
2720 }
2721