xref: /qemu/target/arm/tcg/translate-m-nocp.c (revision 370ed600)
1 /*
2  *  ARM translation: M-profile NOCP special-case instructions
3  *
4  *  Copyright (c) 2020 Linaro, Ltd.
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "tcg/tcg-op.h"
22 #include "tcg/tcg-op-gvec.h"
23 #include "translate.h"
24 #include "translate-a32.h"
25 
26 #include "decode-m-nocp.c.inc"
27 
28 /*
29  * Decode VLLDM and VLSTM are nonstandard because:
30  *  * if there is no FPU then these insns must NOP in
31  *    Secure state and UNDEF in Nonsecure state
32  *  * if there is an FPU then these insns do not have
33  *    the usual behaviour that vfp_access_check() provides of
34  *    being controlled by CPACR/NSACR enable bits or the
35  *    lazy-stacking logic.
36  */
37 static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a)
38 {
39     TCGv_i32 fptr;
40 
41     if (!arm_dc_feature(s, ARM_FEATURE_M) ||
42         !arm_dc_feature(s, ARM_FEATURE_V8)) {
43         return false;
44     }
45 
46     if (a->op) {
47         /*
48          * T2 encoding ({D0-D31} reglist): v8.1M and up. We choose not
49          * to take the IMPDEF option to make memory accesses to the stack
50          * slots that correspond to the D16-D31 registers (discarding
51          * read data and writing UNKNOWN values), so for us the T2
52          * encoding behaves identically to the T1 encoding.
53          */
54         if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
55             return false;
56         }
57     } else {
58         /*
59          * T1 encoding ({D0-D15} reglist); undef if we have 32 Dregs.
60          * This is currently architecturally impossible, but we add the
61          * check to stay in line with the pseudocode. Note that we must
62          * emit code for the UNDEF so it takes precedence over the NOCP.
63          */
64         if (dc_isar_feature(aa32_simd_r32, s)) {
65             unallocated_encoding(s);
66             return true;
67         }
68     }
69 
70     /*
71      * If not secure, UNDEF. We must emit code for this
72      * rather than returning false so that this takes
73      * precedence over the m-nocp.decode NOCP fallback.
74      */
75     if (!s->v8m_secure) {
76         unallocated_encoding(s);
77         return true;
78     }
79 
80     s->eci_handled = true;
81 
82     /* If no fpu, NOP. */
83     if (!dc_isar_feature(aa32_vfp, s)) {
84         clear_eci_state(s);
85         return true;
86     }
87 
88     fptr = load_reg(s, a->rn);
89     if (a->l) {
90         gen_helper_v7m_vlldm(cpu_env, fptr);
91     } else {
92         gen_helper_v7m_vlstm(cpu_env, fptr);
93     }
94 
95     clear_eci_state(s);
96 
97     /*
98      * End the TB, because we have updated FP control bits,
99      * and possibly VPR or LTPSIZE.
100      */
101     s->base.is_jmp = DISAS_UPDATE_EXIT;
102     return true;
103 }
104 
105 static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
106 {
107     int btmreg, topreg;
108     TCGv_i64 zero;
109     TCGv_i32 aspen, sfpa;
110 
111     if (!dc_isar_feature(aa32_m_sec_state, s)) {
112         /* Before v8.1M, fall through in decode to NOCP check */
113         return false;
114     }
115 
116     /* Explicitly UNDEF because this takes precedence over NOCP */
117     if (!arm_dc_feature(s, ARM_FEATURE_M_MAIN) || !s->v8m_secure) {
118         unallocated_encoding(s);
119         return true;
120     }
121 
122     s->eci_handled = true;
123 
124     if (!dc_isar_feature(aa32_vfp_simd, s)) {
125         /* NOP if we have neither FP nor MVE */
126         clear_eci_state(s);
127         return true;
128     }
129 
130     /*
131      * If FPCCR.ASPEN != 0 && CONTROL_S.SFPA == 0 then there is no
132      * active floating point context so we must NOP (without doing
133      * any lazy state preservation or the NOCP check).
134      */
135     aspen = load_cpu_field(v7m.fpccr[M_REG_S]);
136     sfpa = load_cpu_field(v7m.control[M_REG_S]);
137     tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
138     tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
139     tcg_gen_andi_i32(sfpa, sfpa, R_V7M_CONTROL_SFPA_MASK);
140     tcg_gen_or_i32(sfpa, sfpa, aspen);
141     arm_gen_condlabel(s);
142     tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel.label);
143 
144     if (s->fp_excp_el != 0) {
145         gen_exception_insn_el(s, 0, EXCP_NOCP,
146                               syn_uncategorized(), s->fp_excp_el);
147         return true;
148     }
149 
150     topreg = a->vd + a->imm - 1;
151     btmreg = a->vd;
152 
153     /* Convert to Sreg numbers if the insn specified in Dregs */
154     if (a->size == 3) {
155         topreg = topreg * 2 + 1;
156         btmreg *= 2;
157     }
158 
159     if (topreg > 63 || (topreg > 31 && !(topreg & 1))) {
160         /* UNPREDICTABLE: we choose to undef */
161         unallocated_encoding(s);
162         return true;
163     }
164 
165     /* Silently ignore requests to clear D16-D31 if they don't exist */
166     if (topreg > 31 && !dc_isar_feature(aa32_simd_r32, s)) {
167         topreg = 31;
168     }
169 
170     if (!vfp_access_check(s)) {
171         return true;
172     }
173 
174     /* Zero the Sregs from btmreg to topreg inclusive. */
175     zero = tcg_constant_i64(0);
176     if (btmreg & 1) {
177         write_neon_element64(zero, btmreg >> 1, 1, MO_32);
178         btmreg++;
179     }
180     for (; btmreg + 1 <= topreg; btmreg += 2) {
181         write_neon_element64(zero, btmreg >> 1, 0, MO_64);
182     }
183     if (btmreg == topreg) {
184         write_neon_element64(zero, btmreg >> 1, 0, MO_32);
185         btmreg++;
186     }
187     assert(btmreg == topreg + 1);
188     if (dc_isar_feature(aa32_mve, s)) {
189         store_cpu_field(tcg_constant_i32(0), v7m.vpr);
190     }
191 
192     clear_eci_state(s);
193     return true;
194 }
195 
196 /*
197  * M-profile provides two different sets of instructions that can
198  * access floating point system registers: VMSR/VMRS (which move
199  * to/from a general purpose register) and VLDR/VSTR sysreg (which
200  * move directly to/from memory). In some cases there are also side
201  * effects which must happen after any write to memory (which could
202  * cause an exception). So we implement the common logic for the
203  * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
204  * which take pointers to callback functions which will perform the
205  * actual "read/write general purpose register" and "read/write
206  * memory" operations.
207  */
208 
209 /*
210  * Emit code to store the sysreg to its final destination; frees the
211  * TCG temp 'value' it is passed. do_access is true to do the store,
212  * and false to skip it and only perform side-effects like base
213  * register writeback.
214  */
215 typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value,
216                                bool do_access);
217 /*
218  * Emit code to load the value to be copied to the sysreg; returns
219  * a new TCG temporary. do_access is true to do the store,
220  * and false to skip it and only perform side-effects like base
221  * register writeback.
222  */
223 typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque,
224                                   bool do_access);
225 
226 /* Common decode/access checks for fp sysreg read/write */
227 typedef enum FPSysRegCheckResult {
228     FPSysRegCheckFailed, /* caller should return false */
229     FPSysRegCheckDone, /* caller should return true */
230     FPSysRegCheckContinue, /* caller should continue generating code */
231 } FPSysRegCheckResult;
232 
233 static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
234 {
235     if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
236         return FPSysRegCheckFailed;
237     }
238 
239     switch (regno) {
240     case ARM_VFP_FPSCR:
241     case QEMU_VFP_FPSCR_NZCV:
242         break;
243     case ARM_VFP_FPSCR_NZCVQC:
244         if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
245             return FPSysRegCheckFailed;
246         }
247         break;
248     case ARM_VFP_FPCXT_S:
249     case ARM_VFP_FPCXT_NS:
250         if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
251             return FPSysRegCheckFailed;
252         }
253         if (!s->v8m_secure) {
254             return FPSysRegCheckFailed;
255         }
256         break;
257     case ARM_VFP_VPR:
258     case ARM_VFP_P0:
259         if (!dc_isar_feature(aa32_mve, s)) {
260             return FPSysRegCheckFailed;
261         }
262         break;
263     default:
264         return FPSysRegCheckFailed;
265     }
266 
267     /*
268      * FPCXT_NS is a special case: it has specific handling for
269      * "current FP state is inactive", and must do the PreserveFPState()
270      * but not the usual full set of actions done by ExecuteFPCheck().
271      * So we don't call vfp_access_check() and the callers must handle this.
272      */
273     if (regno != ARM_VFP_FPCXT_NS && !vfp_access_check(s)) {
274         return FPSysRegCheckDone;
275     }
276     return FPSysRegCheckContinue;
277 }
278 
279 static void gen_branch_fpInactive(DisasContext *s, TCGCond cond,
280                                   TCGLabel *label)
281 {
282     /*
283      * FPCXT_NS is a special case: it has specific handling for
284      * "current FP state is inactive", and must do the PreserveFPState()
285      * but not the usual full set of actions done by ExecuteFPCheck().
286      * We don't have a TB flag that matches the fpInactive check, so we
287      * do it at runtime as we don't expect FPCXT_NS accesses to be frequent.
288      *
289      * Emit code that checks fpInactive and does a conditional
290      * branch to label based on it:
291      *  if cond is TCG_COND_NE then branch if fpInactive != 0 (ie if inactive)
292      *  if cond is TCG_COND_EQ then branch if fpInactive == 0 (ie if active)
293      */
294     assert(cond == TCG_COND_EQ || cond == TCG_COND_NE);
295 
296     /* fpInactive = FPCCR_NS.ASPEN == 1 && CONTROL.FPCA == 0 */
297     TCGv_i32 aspen, fpca;
298     aspen = load_cpu_field(v7m.fpccr[M_REG_NS]);
299     fpca = load_cpu_field(v7m.control[M_REG_S]);
300     tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
301     tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
302     tcg_gen_andi_i32(fpca, fpca, R_V7M_CONTROL_FPCA_MASK);
303     tcg_gen_or_i32(fpca, fpca, aspen);
304     tcg_gen_brcondi_i32(tcg_invert_cond(cond), fpca, 0, label);
305 }
306 
307 static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
308                                   fp_sysreg_loadfn *loadfn,
309                                   void *opaque)
310 {
311     /* Do a write to an M-profile floating point system register */
312     TCGv_i32 tmp;
313     TCGLabel *lab_end = NULL;
314 
315     switch (fp_sysreg_checks(s, regno)) {
316     case FPSysRegCheckFailed:
317         return false;
318     case FPSysRegCheckDone:
319         return true;
320     case FPSysRegCheckContinue:
321         break;
322     }
323 
324     switch (regno) {
325     case ARM_VFP_FPSCR:
326         tmp = loadfn(s, opaque, true);
327         gen_helper_vfp_set_fpscr(cpu_env, tmp);
328         gen_lookup_tb(s);
329         break;
330     case ARM_VFP_FPSCR_NZCVQC:
331     {
332         TCGv_i32 fpscr;
333         tmp = loadfn(s, opaque, true);
334         if (dc_isar_feature(aa32_mve, s)) {
335             /* QC is only present for MVE; otherwise RES0 */
336             TCGv_i32 qc = tcg_temp_new_i32();
337             tcg_gen_andi_i32(qc, tmp, FPCR_QC);
338             /*
339              * The 4 vfp.qc[] fields need only be "zero" vs "non-zero";
340              * here writing the same value into all elements is simplest.
341              */
342             tcg_gen_gvec_dup_i32(MO_32, offsetof(CPUARMState, vfp.qc),
343                                  16, 16, qc);
344         }
345         tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
346         fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
347         tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK);
348         tcg_gen_or_i32(fpscr, fpscr, tmp);
349         store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);
350         break;
351     }
352     case ARM_VFP_FPCXT_NS:
353     {
354         TCGLabel *lab_active = gen_new_label();
355 
356         lab_end = gen_new_label();
357         gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
358         /*
359          * fpInactive case: write is a NOP, so only do side effects
360          * like register writeback before we branch to end
361          */
362         loadfn(s, opaque, false);
363         tcg_gen_br(lab_end);
364 
365         gen_set_label(lab_active);
366         /*
367          * !fpInactive: if FPU disabled, take NOCP exception;
368          * otherwise PreserveFPState(), and then FPCXT_NS writes
369          * behave the same as FPCXT_S writes.
370          */
371         if (!vfp_access_check_m(s, true)) {
372             /*
373              * This was only a conditional exception, so override
374              * gen_exception_insn_el()'s default to DISAS_NORETURN
375              */
376             s->base.is_jmp = DISAS_NEXT;
377             break;
378         }
379     }
380     /* fall through */
381     case ARM_VFP_FPCXT_S:
382     {
383         TCGv_i32 sfpa, control;
384         /*
385          * Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes
386          * bits [27:0] from value and zeroes bits [31:28].
387          */
388         tmp = loadfn(s, opaque, true);
389         sfpa = tcg_temp_new_i32();
390         tcg_gen_shri_i32(sfpa, tmp, 31);
391         control = load_cpu_field(v7m.control[M_REG_S]);
392         tcg_gen_deposit_i32(control, control, sfpa,
393                             R_V7M_CONTROL_SFPA_SHIFT, 1);
394         store_cpu_field(control, v7m.control[M_REG_S]);
395         tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
396         gen_helper_vfp_set_fpscr(cpu_env, tmp);
397         s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
398         break;
399     }
400     case ARM_VFP_VPR:
401         /* Behaves as NOP if not privileged */
402         if (IS_USER(s)) {
403             loadfn(s, opaque, false);
404             break;
405         }
406         tmp = loadfn(s, opaque, true);
407         store_cpu_field(tmp, v7m.vpr);
408         s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
409         break;
410     case ARM_VFP_P0:
411     {
412         TCGv_i32 vpr;
413         tmp = loadfn(s, opaque, true);
414         vpr = load_cpu_field(v7m.vpr);
415         tcg_gen_deposit_i32(vpr, vpr, tmp,
416                             R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
417         store_cpu_field(vpr, v7m.vpr);
418         s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
419         break;
420     }
421     default:
422         g_assert_not_reached();
423     }
424     if (lab_end) {
425         gen_set_label(lab_end);
426     }
427     return true;
428 }
429 
430 static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
431                                  fp_sysreg_storefn *storefn,
432                                  void *opaque)
433 {
434     /* Do a read from an M-profile floating point system register */
435     TCGv_i32 tmp;
436     TCGLabel *lab_end = NULL;
437     bool lookup_tb = false;
438 
439     switch (fp_sysreg_checks(s, regno)) {
440     case FPSysRegCheckFailed:
441         return false;
442     case FPSysRegCheckDone:
443         return true;
444     case FPSysRegCheckContinue:
445         break;
446     }
447 
448     if (regno == ARM_VFP_FPSCR_NZCVQC && !dc_isar_feature(aa32_mve, s)) {
449         /* QC is RES0 without MVE, so NZCVQC simplifies to NZCV */
450         regno = QEMU_VFP_FPSCR_NZCV;
451     }
452 
453     switch (regno) {
454     case ARM_VFP_FPSCR:
455         tmp = tcg_temp_new_i32();
456         gen_helper_vfp_get_fpscr(tmp, cpu_env);
457         storefn(s, opaque, tmp, true);
458         break;
459     case ARM_VFP_FPSCR_NZCVQC:
460         tmp = tcg_temp_new_i32();
461         gen_helper_vfp_get_fpscr(tmp, cpu_env);
462         tcg_gen_andi_i32(tmp, tmp, FPCR_NZCVQC_MASK);
463         storefn(s, opaque, tmp, true);
464         break;
465     case QEMU_VFP_FPSCR_NZCV:
466         /*
467          * Read just NZCV; this is a special case to avoid the
468          * helper call for the "VMRS to CPSR.NZCV" insn.
469          */
470         tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
471         tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
472         storefn(s, opaque, tmp, true);
473         break;
474     case ARM_VFP_FPCXT_S:
475     {
476         TCGv_i32 control, sfpa, fpscr;
477         /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
478         tmp = tcg_temp_new_i32();
479         sfpa = tcg_temp_new_i32();
480         gen_helper_vfp_get_fpscr(tmp, cpu_env);
481         tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
482         control = load_cpu_field(v7m.control[M_REG_S]);
483         tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
484         tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
485         tcg_gen_or_i32(tmp, tmp, sfpa);
486         /*
487          * Store result before updating FPSCR etc, in case
488          * it is a memory write which causes an exception.
489          */
490         storefn(s, opaque, tmp, true);
491         /*
492          * Now we must reset FPSCR from FPDSCR_NS, and clear
493          * CONTROL.SFPA; so we'll end the TB here.
494          */
495         tcg_gen_andi_i32(control, control, ~R_V7M_CONTROL_SFPA_MASK);
496         store_cpu_field(control, v7m.control[M_REG_S]);
497         fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
498         gen_helper_vfp_set_fpscr(cpu_env, fpscr);
499         lookup_tb = true;
500         break;
501     }
502     case ARM_VFP_FPCXT_NS:
503     {
504         TCGv_i32 control, sfpa, fpscr, fpdscr;
505         TCGLabel *lab_active = gen_new_label();
506 
507         lookup_tb = true;
508 
509         gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
510         /* fpInactive case: reads as FPDSCR_NS */
511         TCGv_i32 tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]);
512         storefn(s, opaque, tmp, true);
513         lab_end = gen_new_label();
514         tcg_gen_br(lab_end);
515 
516         gen_set_label(lab_active);
517         /*
518          * !fpInactive: if FPU disabled, take NOCP exception;
519          * otherwise PreserveFPState(), and then FPCXT_NS
520          * reads the same as FPCXT_S.
521          */
522         if (!vfp_access_check_m(s, true)) {
523             /*
524              * This was only a conditional exception, so override
525              * gen_exception_insn_el()'s default to DISAS_NORETURN
526              */
527             s->base.is_jmp = DISAS_NEXT;
528             break;
529         }
530         tmp = tcg_temp_new_i32();
531         sfpa = tcg_temp_new_i32();
532         fpscr = tcg_temp_new_i32();
533         gen_helper_vfp_get_fpscr(fpscr, cpu_env);
534         tcg_gen_andi_i32(tmp, fpscr, ~FPCR_NZCV_MASK);
535         control = load_cpu_field(v7m.control[M_REG_S]);
536         tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
537         tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
538         tcg_gen_or_i32(tmp, tmp, sfpa);
539         /* Store result before updating FPSCR, in case it faults */
540         storefn(s, opaque, tmp, true);
541         /* If SFPA is zero then set FPSCR from FPDSCR_NS */
542         fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
543         tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, tcg_constant_i32(0),
544                             fpdscr, fpscr);
545         gen_helper_vfp_set_fpscr(cpu_env, fpscr);
546         break;
547     }
548     case ARM_VFP_VPR:
549         /* Behaves as NOP if not privileged */
550         if (IS_USER(s)) {
551             storefn(s, opaque, NULL, false);
552             break;
553         }
554         tmp = load_cpu_field(v7m.vpr);
555         storefn(s, opaque, tmp, true);
556         break;
557     case ARM_VFP_P0:
558         tmp = load_cpu_field(v7m.vpr);
559         tcg_gen_extract_i32(tmp, tmp, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
560         storefn(s, opaque, tmp, true);
561         break;
562     default:
563         g_assert_not_reached();
564     }
565 
566     if (lab_end) {
567         gen_set_label(lab_end);
568     }
569     if (lookup_tb) {
570         gen_lookup_tb(s);
571     }
572     return true;
573 }
574 
575 static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value,
576                              bool do_access)
577 {
578     arg_VMSR_VMRS *a = opaque;
579 
580     if (!do_access) {
581         return;
582     }
583 
584     if (a->rt == 15) {
585         /* Set the 4 flag bits in the CPSR */
586         gen_set_nzcv(value);
587     } else {
588         store_reg(s, a->rt, value);
589     }
590 }
591 
592 static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque, bool do_access)
593 {
594     arg_VMSR_VMRS *a = opaque;
595 
596     if (!do_access) {
597         return NULL;
598     }
599     return load_reg(s, a->rt);
600 }
601 
602 static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
603 {
604     /*
605      * Accesses to R15 are UNPREDICTABLE; we choose to undef.
606      * FPSCR -> r15 is a special case which writes to the PSR flags;
607      * set a->reg to a special value to tell gen_M_fp_sysreg_read()
608      * we only care about the top 4 bits of FPSCR there.
609      */
610     if (a->rt == 15) {
611         if (a->l && a->reg == ARM_VFP_FPSCR) {
612             a->reg = QEMU_VFP_FPSCR_NZCV;
613         } else {
614             return false;
615         }
616     }
617 
618     if (a->l) {
619         /* VMRS, move FP system register to gp register */
620         return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_gpr, a);
621     } else {
622         /* VMSR, move gp register to FP system register */
623         return gen_M_fp_sysreg_write(s, a->reg, gpr_to_fp_sysreg, a);
624     }
625 }
626 
627 static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value,
628                                 bool do_access)
629 {
630     arg_vldr_sysreg *a = opaque;
631     uint32_t offset = a->imm;
632     TCGv_i32 addr;
633 
634     if (!a->a) {
635         offset = -offset;
636     }
637 
638     if (!do_access && !a->w) {
639         return;
640     }
641 
642     addr = load_reg(s, a->rn);
643     if (a->p) {
644         tcg_gen_addi_i32(addr, addr, offset);
645     }
646 
647     if (s->v8m_stackcheck && a->rn == 13 && a->w) {
648         gen_helper_v8m_stackcheck(cpu_env, addr);
649     }
650 
651     if (do_access) {
652         gen_aa32_st_i32(s, value, addr, get_mem_index(s),
653                         MO_UL | MO_ALIGN | s->be_data);
654     }
655 
656     if (a->w) {
657         /* writeback */
658         if (!a->p) {
659             tcg_gen_addi_i32(addr, addr, offset);
660         }
661         store_reg(s, a->rn, addr);
662     }
663 }
664 
665 static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque,
666                                     bool do_access)
667 {
668     arg_vldr_sysreg *a = opaque;
669     uint32_t offset = a->imm;
670     TCGv_i32 addr;
671     TCGv_i32 value = NULL;
672 
673     if (!a->a) {
674         offset = -offset;
675     }
676 
677     if (!do_access && !a->w) {
678         return NULL;
679     }
680 
681     addr = load_reg(s, a->rn);
682     if (a->p) {
683         tcg_gen_addi_i32(addr, addr, offset);
684     }
685 
686     if (s->v8m_stackcheck && a->rn == 13 && a->w) {
687         gen_helper_v8m_stackcheck(cpu_env, addr);
688     }
689 
690     if (do_access) {
691         value = tcg_temp_new_i32();
692         gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
693                         MO_UL | MO_ALIGN | s->be_data);
694     }
695 
696     if (a->w) {
697         /* writeback */
698         if (!a->p) {
699             tcg_gen_addi_i32(addr, addr, offset);
700         }
701         store_reg(s, a->rn, addr);
702     }
703     return value;
704 }
705 
706 static bool trans_VLDR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
707 {
708     if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
709         return false;
710     }
711     if (a->rn == 15) {
712         return false;
713     }
714     return gen_M_fp_sysreg_write(s, a->reg, memory_to_fp_sysreg, a);
715 }
716 
717 static bool trans_VSTR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
718 {
719     if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
720         return false;
721     }
722     if (a->rn == 15) {
723         return false;
724     }
725     return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_memory, a);
726 }
727 
728 static bool trans_NOCP(DisasContext *s, arg_nocp *a)
729 {
730     /*
731      * Handle M-profile early check for disabled coprocessor:
732      * all we need to do here is emit the NOCP exception if
733      * the coprocessor is disabled. Otherwise we return false
734      * and the real VFP/etc decode will handle the insn.
735      */
736     assert(arm_dc_feature(s, ARM_FEATURE_M));
737 
738     if (a->cp == 11) {
739         a->cp = 10;
740     }
741     if (arm_dc_feature(s, ARM_FEATURE_V8_1M) &&
742         (a->cp == 8 || a->cp == 9 || a->cp == 14 || a->cp == 15)) {
743         /* in v8.1M cp 8, 9, 14, 15 also are governed by the cp10 enable */
744         a->cp = 10;
745     }
746 
747     if (a->cp != 10) {
748         gen_exception_insn(s, 0, EXCP_NOCP, syn_uncategorized());
749         return true;
750     }
751 
752     if (s->fp_excp_el != 0) {
753         gen_exception_insn_el(s, 0, EXCP_NOCP,
754                               syn_uncategorized(), s->fp_excp_el);
755         return true;
756     }
757 
758     return false;
759 }
760 
761 static bool trans_NOCP_8_1(DisasContext *s, arg_nocp *a)
762 {
763     /* This range needs a coprocessor check for v8.1M and later only */
764     if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
765         return false;
766     }
767     return trans_NOCP(s, a);
768 }
769