1 /*
2  *  ARM helper routines
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery, LLC
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/units.h"
21 #include "qemu/log.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "internals.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 
29 #define SIGNBIT (uint32_t)0x80000000
30 #define SIGNBIT64 ((uint64_t)1 << 63)
31 
do_raise_exception(CPUARMState * env,uint32_t excp,uint32_t syndrome,uint32_t target_el)32 static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp,
33                                     uint32_t syndrome, uint32_t target_el)
34 {
35     CPUState *cs = env_cpu(env);
36 
37     if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
38         /*
39          * Redirect NS EL1 exceptions to NS EL2. These are reported with
40          * their original syndrome register value, with the exception of
41          * SIMD/FP access traps, which are reported as uncategorized
42          * (see DDI0478C.a D1.10.4)
43          */
44         target_el = 2;
45         if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
46             syndrome = syn_uncategorized();
47         }
48     }
49 
50     assert(!excp_is_internal(excp));
51     cs->exception_index = excp;
52     env->exception.syndrome = syndrome;
53     env->exception.target_el = target_el;
54 
55     return cs;
56 }
57 
raise_exception(CPUARMState * env,uint32_t excp,uint32_t syndrome,uint32_t target_el)58 void raise_exception(CPUARMState *env, uint32_t excp,
59                      uint32_t syndrome, uint32_t target_el)
60 {
61     CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
62     cpu_loop_exit(cs);
63 }
64 
raise_exception_ra(CPUARMState * env,uint32_t excp,uint32_t syndrome,uint32_t target_el,uintptr_t ra)65 void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
66                         uint32_t target_el, uintptr_t ra)
67 {
68     CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
69     cpu_loop_exit_restore(cs, ra);
70 }
71 
HELPER(neon_tbl)72 uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
73                           uint32_t maxindex)
74 {
75     uint32_t val, shift;
76     uint64_t *table = vn;
77 
78     val = 0;
79     for (shift = 0; shift < 32; shift += 8) {
80         uint32_t index = (ireg >> shift) & 0xff;
81         if (index < maxindex) {
82             uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
83             val |= tmp << shift;
84         } else {
85             val |= def & (0xff << shift);
86         }
87     }
88     return val;
89 }
90 
HELPER(v8m_stackcheck)91 void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
92 {
93     /*
94      * Perform the v8M stack limit check for SP updates from translated code,
95      * raising an exception if the limit is breached.
96      */
97     if (newvalue < v7m_sp_limit(env)) {
98         CPUState *cs = env_cpu(env);
99 
100         /*
101          * Stack limit exceptions are a rare case, so rather than syncing
102          * PC/condbits before the call, we use cpu_restore_state() to
103          * get them right before raising the exception.
104          */
105         cpu_restore_state(cs, GETPC(), true);
106         raise_exception(env, EXCP_STKOF, 0, 1);
107     }
108 }
109 
HELPER(add_setq)110 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
111 {
112     uint32_t res = a + b;
113     if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
114         env->QF = 1;
115     return res;
116 }
117 
HELPER(add_saturate)118 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
119 {
120     uint32_t res = a + b;
121     if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
122         env->QF = 1;
123         res = ~(((int32_t)a >> 31) ^ SIGNBIT);
124     }
125     return res;
126 }
127 
HELPER(sub_saturate)128 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
129 {
130     uint32_t res = a - b;
131     if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
132         env->QF = 1;
133         res = ~(((int32_t)a >> 31) ^ SIGNBIT);
134     }
135     return res;
136 }
137 
HELPER(add_usaturate)138 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
139 {
140     uint32_t res = a + b;
141     if (res < a) {
142         env->QF = 1;
143         res = ~0;
144     }
145     return res;
146 }
147 
HELPER(sub_usaturate)148 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
149 {
150     uint32_t res = a - b;
151     if (res > a) {
152         env->QF = 1;
153         res = 0;
154     }
155     return res;
156 }
157 
158 /* Signed saturation.  */
do_ssat(CPUARMState * env,int32_t val,int shift)159 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
160 {
161     int32_t top;
162     uint32_t mask;
163 
164     top = val >> shift;
165     mask = (1u << shift) - 1;
166     if (top > 0) {
167         env->QF = 1;
168         return mask;
169     } else if (top < -1) {
170         env->QF = 1;
171         return ~mask;
172     }
173     return val;
174 }
175 
176 /* Unsigned saturation.  */
do_usat(CPUARMState * env,int32_t val,int shift)177 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
178 {
179     uint32_t max;
180 
181     max = (1u << shift) - 1;
182     if (val < 0) {
183         env->QF = 1;
184         return 0;
185     } else if (val > max) {
186         env->QF = 1;
187         return max;
188     }
189     return val;
190 }
191 
192 /* Signed saturate.  */
HELPER(ssat)193 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
194 {
195     return do_ssat(env, x, shift);
196 }
197 
198 /* Dual halfword signed saturate.  */
HELPER(ssat16)199 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
200 {
201     uint32_t res;
202 
203     res = (uint16_t)do_ssat(env, (int16_t)x, shift);
204     res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
205     return res;
206 }
207 
208 /* Unsigned saturate.  */
HELPER(usat)209 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
210 {
211     return do_usat(env, x, shift);
212 }
213 
214 /* Dual halfword unsigned saturate.  */
HELPER(usat16)215 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
216 {
217     uint32_t res;
218 
219     res = (uint16_t)do_usat(env, (int16_t)x, shift);
220     res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
221     return res;
222 }
223 
HELPER(setend)224 void HELPER(setend)(CPUARMState *env)
225 {
226     env->uncached_cpsr ^= CPSR_E;
227     arm_rebuild_hflags(env);
228 }
229 
230 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
231  * The function returns the target EL (1-3) if the instruction is to be trapped;
232  * otherwise it returns 0 indicating it is not trapped.
233  */
check_wfx_trap(CPUARMState * env,bool is_wfe)234 static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
235 {
236     int cur_el = arm_current_el(env);
237     uint64_t mask;
238 
239     if (arm_feature(env, ARM_FEATURE_M)) {
240         /* M profile cores can never trap WFI/WFE. */
241         return 0;
242     }
243 
244     /* If we are currently in EL0 then we need to check if SCTLR is set up for
245      * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
246      */
247     if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
248         int target_el;
249 
250         mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
251         if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
252             /* Secure EL0 and Secure PL1 is at EL3 */
253             target_el = 3;
254         } else {
255             target_el = 1;
256         }
257 
258         if (!(env->cp15.sctlr_el[target_el] & mask)) {
259             return target_el;
260         }
261     }
262 
263     /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
264      * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
265      * bits will be zero indicating no trap.
266      */
267     if (cur_el < 2) {
268         mask = is_wfe ? HCR_TWE : HCR_TWI;
269         if (arm_hcr_el2_eff(env) & mask) {
270             return 2;
271         }
272     }
273 
274     /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
275     if (cur_el < 3) {
276         mask = (is_wfe) ? SCR_TWE : SCR_TWI;
277         if (env->cp15.scr_el3 & mask) {
278             return 3;
279         }
280     }
281 
282     return 0;
283 }
284 
HELPER(wfi)285 void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
286 {
287     CPUState *cs = env_cpu(env);
288     int target_el = check_wfx_trap(env, false);
289 
290     if (cpu_has_work(cs)) {
291         /* Don't bother to go into our "low power state" if
292          * we would just wake up immediately.
293          */
294         return;
295     }
296 
297     if (target_el) {
298         env->pc -= insn_len;
299         raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
300                         target_el);
301     }
302 
303     cs->exception_index = EXCP_HLT;
304     cs->halted = 1;
305     cpu_loop_exit(cs);
306 }
307 
HELPER(wfe)308 void HELPER(wfe)(CPUARMState *env)
309 {
310     /* This is a hint instruction that is semantically different
311      * from YIELD even though we currently implement it identically.
312      * Don't actually halt the CPU, just yield back to top
313      * level loop. This is not going into a "low power state"
314      * (ie halting until some event occurs), so we never take
315      * a configurable trap to a different exception level.
316      */
317     HELPER(yield)(env);
318 }
319 
HELPER(yield)320 void HELPER(yield)(CPUARMState *env)
321 {
322     CPUState *cs = env_cpu(env);
323 
324     /* This is a non-trappable hint instruction that generally indicates
325      * that the guest is currently busy-looping. Yield control back to the
326      * top level loop so that a more deserving VCPU has a chance to run.
327      */
328     cs->exception_index = EXCP_YIELD;
329     cpu_loop_exit(cs);
330 }
331 
332 /* Raise an internal-to-QEMU exception. This is limited to only
333  * those EXCP values which are special cases for QEMU to interrupt
334  * execution and not to be used for exceptions which are passed to
335  * the guest (those must all have syndrome information and thus should
336  * use exception_with_syndrome).
337  */
HELPER(exception_internal)338 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
339 {
340     CPUState *cs = env_cpu(env);
341 
342     assert(excp_is_internal(excp));
343     cs->exception_index = excp;
344     cpu_loop_exit(cs);
345 }
346 
347 /* Raise an exception with the specified syndrome register value */
HELPER(exception_with_syndrome)348 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
349                                      uint32_t syndrome, uint32_t target_el)
350 {
351     raise_exception(env, excp, syndrome, target_el);
352 }
353 
354 /* Raise an EXCP_BKPT with the specified syndrome register value,
355  * targeting the correct exception level for debug exceptions.
356  */
HELPER(exception_bkpt_insn)357 void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
358 {
359     int debug_el = arm_debug_target_el(env);
360     int cur_el = arm_current_el(env);
361 
362     /* FSR will only be used if the debug target EL is AArch32. */
363     env->exception.fsr = arm_debug_exception_fsr(env);
364     /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
365      * values to the guest that it shouldn't be able to see at its
366      * exception/security level.
367      */
368     env->exception.vaddress = 0;
369     /*
370      * Other kinds of architectural debug exception are ignored if
371      * they target an exception level below the current one (in QEMU
372      * this is checked by arm_generate_debug_exceptions()). Breakpoint
373      * instructions are special because they always generate an exception
374      * to somewhere: if they can't go to the configured debug exception
375      * level they are taken to the current exception level.
376      */
377     if (debug_el < cur_el) {
378         debug_el = cur_el;
379     }
380     raise_exception(env, EXCP_BKPT, syndrome, debug_el);
381 }
382 
HELPER(cpsr_read)383 uint32_t HELPER(cpsr_read)(CPUARMState *env)
384 {
385     return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
386 }
387 
HELPER(cpsr_write)388 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
389 {
390     cpsr_write(env, val, mask, CPSRWriteByInstr);
391     /* TODO: Not all cpsr bits are relevant to hflags.  */
392     arm_rebuild_hflags(env);
393 }
394 
395 /* Write the CPSR for a 32-bit exception return */
HELPER(cpsr_write_eret)396 void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
397 {
398     qemu_mutex_lock_iothread();
399     arm_call_pre_el_change_hook(env_archcpu(env));
400     qemu_mutex_unlock_iothread();
401 
402     cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
403 
404     /* Generated code has already stored the new PC value, but
405      * without masking out its low bits, because which bits need
406      * masking depends on whether we're returning to Thumb or ARM
407      * state. Do the masking now.
408      */
409     env->regs[15] &= (env->thumb ? ~1 : ~3);
410     arm_rebuild_hflags(env);
411 
412     qemu_mutex_lock_iothread();
413     arm_call_el_change_hook(env_archcpu(env));
414     qemu_mutex_unlock_iothread();
415 }
416 
417 /* Access to user mode registers from privileged modes.  */
HELPER(get_user_reg)418 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
419 {
420     uint32_t val;
421 
422     if (regno == 13) {
423         val = env->banked_r13[BANK_USRSYS];
424     } else if (regno == 14) {
425         val = env->banked_r14[BANK_USRSYS];
426     } else if (regno >= 8
427                && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
428         val = env->usr_regs[regno - 8];
429     } else {
430         val = env->regs[regno];
431     }
432     return val;
433 }
434 
HELPER(set_user_reg)435 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
436 {
437     if (regno == 13) {
438         env->banked_r13[BANK_USRSYS] = val;
439     } else if (regno == 14) {
440         env->banked_r14[BANK_USRSYS] = val;
441     } else if (regno >= 8
442                && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
443         env->usr_regs[regno - 8] = val;
444     } else {
445         env->regs[regno] = val;
446     }
447 }
448 
HELPER(set_r13_banked)449 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
450 {
451     if ((env->uncached_cpsr & CPSR_M) == mode) {
452         env->regs[13] = val;
453     } else {
454         env->banked_r13[bank_number(mode)] = val;
455     }
456 }
457 
HELPER(get_r13_banked)458 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
459 {
460     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
461         /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
462          * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
463          */
464         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
465                         exception_target_el(env));
466     }
467 
468     if ((env->uncached_cpsr & CPSR_M) == mode) {
469         return env->regs[13];
470     } else {
471         return env->banked_r13[bank_number(mode)];
472     }
473 }
474 
msr_mrs_banked_exc_checks(CPUARMState * env,uint32_t tgtmode,uint32_t regno)475 static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
476                                       uint32_t regno)
477 {
478     /* Raise an exception if the requested access is one of the UNPREDICTABLE
479      * cases; otherwise return. This broadly corresponds to the pseudocode
480      * BankedRegisterAccessValid() and SPSRAccessValid(),
481      * except that we have already handled some cases at translate time.
482      */
483     int curmode = env->uncached_cpsr & CPSR_M;
484 
485     if (regno == 17) {
486         /* ELR_Hyp: a special case because access from tgtmode is OK */
487         if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
488             goto undef;
489         }
490         return;
491     }
492 
493     if (curmode == tgtmode) {
494         goto undef;
495     }
496 
497     if (tgtmode == ARM_CPU_MODE_USR) {
498         switch (regno) {
499         case 8 ... 12:
500             if (curmode != ARM_CPU_MODE_FIQ) {
501                 goto undef;
502             }
503             break;
504         case 13:
505             if (curmode == ARM_CPU_MODE_SYS) {
506                 goto undef;
507             }
508             break;
509         case 14:
510             if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
511                 goto undef;
512             }
513             break;
514         default:
515             break;
516         }
517     }
518 
519     if (tgtmode == ARM_CPU_MODE_HYP) {
520         /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
521         if (curmode != ARM_CPU_MODE_MON) {
522             goto undef;
523         }
524     }
525 
526     return;
527 
528 undef:
529     raise_exception(env, EXCP_UDEF, syn_uncategorized(),
530                     exception_target_el(env));
531 }
532 
HELPER(msr_banked)533 void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
534                         uint32_t regno)
535 {
536     msr_mrs_banked_exc_checks(env, tgtmode, regno);
537 
538     switch (regno) {
539     case 16: /* SPSRs */
540         env->banked_spsr[bank_number(tgtmode)] = value;
541         break;
542     case 17: /* ELR_Hyp */
543         env->elr_el[2] = value;
544         break;
545     case 13:
546         env->banked_r13[bank_number(tgtmode)] = value;
547         break;
548     case 14:
549         env->banked_r14[r14_bank_number(tgtmode)] = value;
550         break;
551     case 8 ... 12:
552         switch (tgtmode) {
553         case ARM_CPU_MODE_USR:
554             env->usr_regs[regno - 8] = value;
555             break;
556         case ARM_CPU_MODE_FIQ:
557             env->fiq_regs[regno - 8] = value;
558             break;
559         default:
560             g_assert_not_reached();
561         }
562         break;
563     default:
564         g_assert_not_reached();
565     }
566 }
567 
HELPER(mrs_banked)568 uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
569 {
570     msr_mrs_banked_exc_checks(env, tgtmode, regno);
571 
572     switch (regno) {
573     case 16: /* SPSRs */
574         return env->banked_spsr[bank_number(tgtmode)];
575     case 17: /* ELR_Hyp */
576         return env->elr_el[2];
577     case 13:
578         return env->banked_r13[bank_number(tgtmode)];
579     case 14:
580         return env->banked_r14[r14_bank_number(tgtmode)];
581     case 8 ... 12:
582         switch (tgtmode) {
583         case ARM_CPU_MODE_USR:
584             return env->usr_regs[regno - 8];
585         case ARM_CPU_MODE_FIQ:
586             return env->fiq_regs[regno - 8];
587         default:
588             g_assert_not_reached();
589         }
590     default:
591         g_assert_not_reached();
592     }
593 }
594 
HELPER(access_check_cp_reg)595 void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
596                                  uint32_t isread)
597 {
598     const ARMCPRegInfo *ri = rip;
599     int target_el;
600 
601     if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
602         && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
603         raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
604     }
605 
606     if (!ri->accessfn) {
607         return;
608     }
609 
610     switch (ri->accessfn(env, ri, isread)) {
611     case CP_ACCESS_OK:
612         return;
613     case CP_ACCESS_TRAP:
614         target_el = exception_target_el(env);
615         break;
616     case CP_ACCESS_TRAP_EL2:
617         /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
618          * a bug in the access function.
619          */
620         assert(!arm_is_secure(env) && arm_current_el(env) != 3);
621         target_el = 2;
622         break;
623     case CP_ACCESS_TRAP_EL3:
624         target_el = 3;
625         break;
626     case CP_ACCESS_TRAP_UNCATEGORIZED:
627         target_el = exception_target_el(env);
628         syndrome = syn_uncategorized();
629         break;
630     case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
631         target_el = 2;
632         syndrome = syn_uncategorized();
633         break;
634     case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
635         target_el = 3;
636         syndrome = syn_uncategorized();
637         break;
638     case CP_ACCESS_TRAP_FP_EL2:
639         target_el = 2;
640         /* Since we are an implementation that takes exceptions on a trapped
641          * conditional insn only if the insn has passed its condition code
642          * check, we take the IMPDEF choice to always report CV=1 COND=0xe
643          * (which is also the required value for AArch64 traps).
644          */
645         syndrome = syn_fp_access_trap(1, 0xe, false);
646         break;
647     case CP_ACCESS_TRAP_FP_EL3:
648         target_el = 3;
649         syndrome = syn_fp_access_trap(1, 0xe, false);
650         break;
651     default:
652         g_assert_not_reached();
653     }
654 
655     raise_exception(env, EXCP_UDEF, syndrome, target_el);
656 }
657 
HELPER(set_cp_reg)658 void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
659 {
660     const ARMCPRegInfo *ri = rip;
661 
662     if (ri->type & ARM_CP_IO) {
663         qemu_mutex_lock_iothread();
664         ri->writefn(env, ri, value);
665         qemu_mutex_unlock_iothread();
666     } else {
667         ri->writefn(env, ri, value);
668     }
669 }
670 
HELPER(get_cp_reg)671 uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
672 {
673     const ARMCPRegInfo *ri = rip;
674     uint32_t res;
675 
676     if (ri->type & ARM_CP_IO) {
677         qemu_mutex_lock_iothread();
678         res = ri->readfn(env, ri);
679         qemu_mutex_unlock_iothread();
680     } else {
681         res = ri->readfn(env, ri);
682     }
683 
684     return res;
685 }
686 
HELPER(set_cp_reg64)687 void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
688 {
689     const ARMCPRegInfo *ri = rip;
690 
691     if (ri->type & ARM_CP_IO) {
692         qemu_mutex_lock_iothread();
693         ri->writefn(env, ri, value);
694         qemu_mutex_unlock_iothread();
695     } else {
696         ri->writefn(env, ri, value);
697     }
698 }
699 
HELPER(get_cp_reg64)700 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
701 {
702     const ARMCPRegInfo *ri = rip;
703     uint64_t res;
704 
705     if (ri->type & ARM_CP_IO) {
706         qemu_mutex_lock_iothread();
707         res = ri->readfn(env, ri);
708         qemu_mutex_unlock_iothread();
709     } else {
710         res = ri->readfn(env, ri);
711     }
712 
713     return res;
714 }
715 
HELPER(pre_hvc)716 void HELPER(pre_hvc)(CPUARMState *env)
717 {
718     ARMCPU *cpu = env_archcpu(env);
719     int cur_el = arm_current_el(env);
720     /* FIXME: Use actual secure state.  */
721     bool secure = false;
722     bool undef;
723 
724     if (arm_is_psci_call(cpu, EXCP_HVC)) {
725         /* If PSCI is enabled and this looks like a valid PSCI call then
726          * that overrides the architecturally mandated HVC behaviour.
727          */
728         return;
729     }
730 
731     if (!arm_feature(env, ARM_FEATURE_EL2)) {
732         /* If EL2 doesn't exist, HVC always UNDEFs */
733         undef = true;
734     } else if (arm_feature(env, ARM_FEATURE_EL3)) {
735         /* EL3.HCE has priority over EL2.HCD. */
736         undef = !(env->cp15.scr_el3 & SCR_HCE);
737     } else {
738         undef = env->cp15.hcr_el2 & HCR_HCD;
739     }
740 
741     /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
742      * For ARMv8/AArch64, HVC is allowed in EL3.
743      * Note that we've already trapped HVC from EL0 at translation
744      * time.
745      */
746     if (secure && (!is_a64(env) || cur_el == 1)) {
747         undef = true;
748     }
749 
750     if (undef) {
751         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
752                         exception_target_el(env));
753     }
754 }
755 
HELPER(pre_smc)756 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
757 {
758     ARMCPU *cpu = env_archcpu(env);
759     int cur_el = arm_current_el(env);
760     bool secure = arm_is_secure(env);
761     bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
762 
763     /*
764      * SMC behaviour is summarized in the following table.
765      * This helper handles the "Trap to EL2" and "Undef insn" cases.
766      * The "Trap to EL3" and "PSCI call" cases are handled in the exception
767      * helper.
768      *
769      *  -> ARM_FEATURE_EL3 and !SMD
770      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
771      *
772      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
773      *  Conduit SMC, inval call  Trap to EL2         Trap to EL3
774      *  Conduit not SMC          Trap to EL2         Trap to EL3
775      *
776      *
777      *  -> ARM_FEATURE_EL3 and SMD
778      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
779      *
780      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
781      *  Conduit SMC, inval call  Trap to EL2         Undef insn
782      *  Conduit not SMC          Trap to EL2         Undef insn
783      *
784      *
785      *  -> !ARM_FEATURE_EL3
786      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
787      *
788      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
789      *  Conduit SMC, inval call  Trap to EL2         Undef insn
790      *  Conduit not SMC          Undef insn          Undef insn
791      */
792 
793     /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
794      * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
795      *  extensions, SMD only applies to NS state.
796      * On ARMv7 without the Virtualization extensions, the SMD bit
797      * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
798      * so we need not special case this here.
799      */
800     bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
801                                                      : smd_flag && !secure;
802 
803     if (!arm_feature(env, ARM_FEATURE_EL3) &&
804         cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
805         /* If we have no EL3 then SMC always UNDEFs and can't be
806          * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
807          * firmware within QEMU, and we want an EL2 guest to be able
808          * to forbid its EL1 from making PSCI calls into QEMU's
809          * "firmware" via HCR.TSC, so for these purposes treat
810          * PSCI-via-SMC as implying an EL3.
811          * This handles the very last line of the previous table.
812          */
813         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
814                         exception_target_el(env));
815     }
816 
817     if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
818         /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
819          * We also want an EL2 guest to be able to forbid its EL1 from
820          * making PSCI calls into QEMU's "firmware" via HCR.TSC.
821          * This handles all the "Trap to EL2" cases of the previous table.
822          */
823         raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
824     }
825 
826     /* Catch the two remaining "Undef insn" cases of the previous table:
827      *    - PSCI conduit is SMC but we don't have a valid PCSI call,
828      *    - We don't have EL3 or SMD is set.
829      */
830     if (!arm_is_psci_call(cpu, EXCP_SMC) &&
831         (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
832         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
833                         exception_target_el(env));
834     }
835 }
836 
837 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
838    The only way to do that in TCG is a conditional branch, which clobbers
839    all our temporaries.  For now implement these as helper functions.  */
840 
841 /* Similarly for variable shift instructions.  */
842 
HELPER(shl_cc)843 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
844 {
845     int shift = i & 0xff;
846     if (shift >= 32) {
847         if (shift == 32)
848             env->CF = x & 1;
849         else
850             env->CF = 0;
851         return 0;
852     } else if (shift != 0) {
853         env->CF = (x >> (32 - shift)) & 1;
854         return x << shift;
855     }
856     return x;
857 }
858 
HELPER(shr_cc)859 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
860 {
861     int shift = i & 0xff;
862     if (shift >= 32) {
863         if (shift == 32)
864             env->CF = (x >> 31) & 1;
865         else
866             env->CF = 0;
867         return 0;
868     } else if (shift != 0) {
869         env->CF = (x >> (shift - 1)) & 1;
870         return x >> shift;
871     }
872     return x;
873 }
874 
HELPER(sar_cc)875 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
876 {
877     int shift = i & 0xff;
878     if (shift >= 32) {
879         env->CF = (x >> 31) & 1;
880         return (int32_t)x >> 31;
881     } else if (shift != 0) {
882         env->CF = (x >> (shift - 1)) & 1;
883         return (int32_t)x >> shift;
884     }
885     return x;
886 }
887 
HELPER(ror_cc)888 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
889 {
890     int shift1, shift;
891     shift1 = i & 0xff;
892     shift = shift1 & 0x1f;
893     if (shift == 0) {
894         if (shift1 != 0)
895             env->CF = (x >> 31) & 1;
896         return x;
897     } else {
898         env->CF = (x >> (shift - 1)) & 1;
899         return ((uint32_t)x >> shift) | (x << (32 - shift));
900     }
901 }
902 
HELPER(dc_zva)903 void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
904 {
905     /*
906      * Implement DC ZVA, which zeroes a fixed-length block of memory.
907      * Note that we do not implement the (architecturally mandated)
908      * alignment fault for attempts to use this on Device memory
909      * (which matches the usual QEMU behaviour of not implementing either
910      * alignment faults or any memory attribute handling).
911      */
912 
913     ARMCPU *cpu = env_archcpu(env);
914     uint64_t blocklen = 4 << cpu->dcz_blocksize;
915     uint64_t vaddr = vaddr_in & ~(blocklen - 1);
916 
917 #ifndef CONFIG_USER_ONLY
918     {
919         /*
920          * Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
921          * the block size so we might have to do more than one TLB lookup.
922          * We know that in fact for any v8 CPU the page size is at least 4K
923          * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
924          * 1K as an artefact of legacy v5 subpage support being present in the
925          * same QEMU executable. So in practice the hostaddr[] array has
926          * two entries, given the current setting of TARGET_PAGE_BITS_MIN.
927          */
928         int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
929         void *hostaddr[DIV_ROUND_UP(2 * KiB, 1 << TARGET_PAGE_BITS_MIN)];
930         int try, i;
931         unsigned mmu_idx = cpu_mmu_index(env, false);
932         TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
933 
934         assert(maxidx <= ARRAY_SIZE(hostaddr));
935 
936         for (try = 0; try < 2; try++) {
937 
938             for (i = 0; i < maxidx; i++) {
939                 hostaddr[i] = tlb_vaddr_to_host(env,
940                                                 vaddr + TARGET_PAGE_SIZE * i,
941                                                 1, mmu_idx);
942                 if (!hostaddr[i]) {
943                     break;
944                 }
945             }
946             if (i == maxidx) {
947                 /*
948                  * If it's all in the TLB it's fair game for just writing to;
949                  * we know we don't need to update dirty status, etc.
950                  */
951                 for (i = 0; i < maxidx - 1; i++) {
952                     memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
953                 }
954                 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
955                 return;
956             }
957             /*
958              * OK, try a store and see if we can populate the tlb. This
959              * might cause an exception if the memory isn't writable,
960              * in which case we will longjmp out of here. We must for
961              * this purpose use the actual register value passed to us
962              * so that we get the fault address right.
963              */
964             helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
965             /* Now we can populate the other TLB entries, if any */
966             for (i = 0; i < maxidx; i++) {
967                 uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
968                 if (va != (vaddr_in & TARGET_PAGE_MASK)) {
969                     helper_ret_stb_mmu(env, va, 0, oi, GETPC());
970                 }
971             }
972         }
973 
974         /*
975          * Slow path (probably attempt to do this to an I/O device or
976          * similar, or clearing of a block of code we have translations
977          * cached for). Just do a series of byte writes as the architecture
978          * demands. It's not worth trying to use a cpu_physical_memory_map(),
979          * memset(), unmap() sequence here because:
980          *  + we'd need to account for the blocksize being larger than a page
981          *  + the direct-RAM access case is almost always going to be dealt
982          *    with in the fastpath code above, so there's no speed benefit
983          *  + we would have to deal with the map returning NULL because the
984          *    bounce buffer was in use
985          */
986         for (i = 0; i < blocklen; i++) {
987             helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
988         }
989     }
990 #else
991     memset(g2h(vaddr), 0, blocklen);
992 #endif
993 }
994