xref: /qemu/target/arm/tcg/translate.c (revision 09a52d85)
1 /*
2  *  ARM translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *  Copyright (c) 2005-2007 CodeSourcery
6  *  Copyright (c) 2007 OpenedHand, Ltd.
7  *
8  * This library is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20  */
21 #include "qemu/osdep.h"
22 
23 #include "translate.h"
24 #include "translate-a32.h"
25 #include "qemu/log.h"
26 #include "arm_ldst.h"
27 #include "semihosting/semihost.h"
28 #include "cpregs.h"
29 #include "exec/helper-proto.h"
30 
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef  HELPER_H
34 
35 #define ENABLE_ARCH_4T    arm_dc_feature(s, ARM_FEATURE_V4T)
36 #define ENABLE_ARCH_5     arm_dc_feature(s, ARM_FEATURE_V5)
37 /* currently all emulated v5 cores are also v5TE, so don't bother */
38 #define ENABLE_ARCH_5TE   arm_dc_feature(s, ARM_FEATURE_V5)
39 #define ENABLE_ARCH_5J    dc_isar_feature(aa32_jazelle, s)
40 #define ENABLE_ARCH_6     arm_dc_feature(s, ARM_FEATURE_V6)
41 #define ENABLE_ARCH_6K    arm_dc_feature(s, ARM_FEATURE_V6K)
42 #define ENABLE_ARCH_6T2   arm_dc_feature(s, ARM_FEATURE_THUMB2)
43 #define ENABLE_ARCH_7     arm_dc_feature(s, ARM_FEATURE_V7)
44 #define ENABLE_ARCH_8     arm_dc_feature(s, ARM_FEATURE_V8)
45 
46 /* These are TCG temporaries used only by the legacy iwMMXt decoder */
47 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
48 /* These are TCG globals which alias CPUARMState fields */
49 static TCGv_i32 cpu_R[16];
50 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
51 TCGv_i64 cpu_exclusive_addr;
52 TCGv_i64 cpu_exclusive_val;
53 
54 static const char * const regnames[] =
55     { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
56       "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
57 
58 
59 /* initialize TCG globals.  */
arm_translate_init(void)60 void arm_translate_init(void)
61 {
62     int i;
63 
64     for (i = 0; i < 16; i++) {
65         cpu_R[i] = tcg_global_mem_new_i32(tcg_env,
66                                           offsetof(CPUARMState, regs[i]),
67                                           regnames[i]);
68     }
69     cpu_CF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, CF), "CF");
70     cpu_NF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, NF), "NF");
71     cpu_VF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, VF), "VF");
72     cpu_ZF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, ZF), "ZF");
73 
74     cpu_exclusive_addr = tcg_global_mem_new_i64(tcg_env,
75         offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
76     cpu_exclusive_val = tcg_global_mem_new_i64(tcg_env,
77         offsetof(CPUARMState, exclusive_val), "exclusive_val");
78 
79     a64_translate_init();
80 }
81 
asimd_imm_const(uint32_t imm,int cmode,int op)82 uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
83 {
84     /* Expand the encoded constant as per AdvSIMDExpandImm pseudocode */
85     switch (cmode) {
86     case 0: case 1:
87         /* no-op */
88         break;
89     case 2: case 3:
90         imm <<= 8;
91         break;
92     case 4: case 5:
93         imm <<= 16;
94         break;
95     case 6: case 7:
96         imm <<= 24;
97         break;
98     case 8: case 9:
99         imm |= imm << 16;
100         break;
101     case 10: case 11:
102         imm = (imm << 8) | (imm << 24);
103         break;
104     case 12:
105         imm = (imm << 8) | 0xff;
106         break;
107     case 13:
108         imm = (imm << 16) | 0xffff;
109         break;
110     case 14:
111         if (op) {
112             /*
113              * This and cmode == 15 op == 1 are the only cases where
114              * the top and bottom 32 bits of the encoded constant differ.
115              */
116             uint64_t imm64 = 0;
117             int n;
118 
119             for (n = 0; n < 8; n++) {
120                 if (imm & (1 << n)) {
121                     imm64 |= (0xffULL << (n * 8));
122                 }
123             }
124             return imm64;
125         }
126         imm |= (imm << 8) | (imm << 16) | (imm << 24);
127         break;
128     case 15:
129         if (op) {
130             /* Reserved encoding for AArch32; valid for AArch64 */
131             uint64_t imm64 = (uint64_t)(imm & 0x3f) << 48;
132             if (imm & 0x80) {
133                 imm64 |= 0x8000000000000000ULL;
134             }
135             if (imm & 0x40) {
136                 imm64 |= 0x3fc0000000000000ULL;
137             } else {
138                 imm64 |= 0x4000000000000000ULL;
139             }
140             return imm64;
141         }
142         imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
143             | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
144         break;
145     }
146     if (op) {
147         imm = ~imm;
148     }
149     return dup_const(MO_32, imm);
150 }
151 
152 /* Generate a label used for skipping this instruction */
arm_gen_condlabel(DisasContext * s)153 void arm_gen_condlabel(DisasContext *s)
154 {
155     if (!s->condjmp) {
156         s->condlabel = gen_disas_label(s);
157         s->condjmp = 1;
158     }
159 }
160 
161 /* Flags for the disas_set_da_iss info argument:
162  * lower bits hold the Rt register number, higher bits are flags.
163  */
164 typedef enum ISSInfo {
165     ISSNone = 0,
166     ISSRegMask = 0x1f,
167     ISSInvalid = (1 << 5),
168     ISSIsAcqRel = (1 << 6),
169     ISSIsWrite = (1 << 7),
170     ISSIs16Bit = (1 << 8),
171 } ISSInfo;
172 
173 /*
174  * Store var into env + offset to a member with size bytes.
175  * Free var after use.
176  */
store_cpu_offset(TCGv_i32 var,int offset,int size)177 void store_cpu_offset(TCGv_i32 var, int offset, int size)
178 {
179     switch (size) {
180     case 1:
181         tcg_gen_st8_i32(var, tcg_env, offset);
182         break;
183     case 4:
184         tcg_gen_st_i32(var, tcg_env, offset);
185         break;
186     default:
187         g_assert_not_reached();
188     }
189 }
190 
191 /* Save the syndrome information for a Data Abort */
disas_set_da_iss(DisasContext * s,MemOp memop,ISSInfo issinfo)192 static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo)
193 {
194     uint32_t syn;
195     int sas = memop & MO_SIZE;
196     bool sse = memop & MO_SIGN;
197     bool is_acqrel = issinfo & ISSIsAcqRel;
198     bool is_write = issinfo & ISSIsWrite;
199     bool is_16bit = issinfo & ISSIs16Bit;
200     int srt = issinfo & ISSRegMask;
201 
202     if (issinfo & ISSInvalid) {
203         /* Some callsites want to conditionally provide ISS info,
204          * eg "only if this was not a writeback"
205          */
206         return;
207     }
208 
209     if (srt == 15) {
210         /* For AArch32, insns where the src/dest is R15 never generate
211          * ISS information. Catching that here saves checking at all
212          * the call sites.
213          */
214         return;
215     }
216 
217     syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
218                                   0, 0, 0, is_write, 0, is_16bit);
219     disas_set_insn_syndrome(s, syn);
220 }
221 
get_a32_user_mem_index(DisasContext * s)222 static inline int get_a32_user_mem_index(DisasContext *s)
223 {
224     /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
225      * insns:
226      *  if PL2, UNPREDICTABLE (we choose to implement as if PL0)
227      *  otherwise, access as if at PL0.
228      */
229     switch (s->mmu_idx) {
230     case ARMMMUIdx_E3:
231     case ARMMMUIdx_E2:        /* this one is UNPREDICTABLE */
232     case ARMMMUIdx_E10_0:
233     case ARMMMUIdx_E10_1:
234     case ARMMMUIdx_E10_1_PAN:
235         return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
236     case ARMMMUIdx_MUser:
237     case ARMMMUIdx_MPriv:
238         return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
239     case ARMMMUIdx_MUserNegPri:
240     case ARMMMUIdx_MPrivNegPri:
241         return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
242     case ARMMMUIdx_MSUser:
243     case ARMMMUIdx_MSPriv:
244         return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
245     case ARMMMUIdx_MSUserNegPri:
246     case ARMMMUIdx_MSPrivNegPri:
247         return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
248     default:
249         g_assert_not_reached();
250     }
251 }
252 
253 /* The pc_curr difference for an architectural jump. */
jmp_diff(DisasContext * s,target_long diff)254 static target_long jmp_diff(DisasContext *s, target_long diff)
255 {
256     return diff + (s->thumb ? 4 : 8);
257 }
258 
gen_pc_plus_diff(DisasContext * s,TCGv_i32 var,target_long diff)259 static void gen_pc_plus_diff(DisasContext *s, TCGv_i32 var, target_long diff)
260 {
261     assert(s->pc_save != -1);
262     if (tb_cflags(s->base.tb) & CF_PCREL) {
263         tcg_gen_addi_i32(var, cpu_R[15], (s->pc_curr - s->pc_save) + diff);
264     } else {
265         tcg_gen_movi_i32(var, s->pc_curr + diff);
266     }
267 }
268 
269 /* Set a variable to the value of a CPU register.  */
load_reg_var(DisasContext * s,TCGv_i32 var,int reg)270 void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
271 {
272     if (reg == 15) {
273         gen_pc_plus_diff(s, var, jmp_diff(s, 0));
274     } else {
275         tcg_gen_mov_i32(var, cpu_R[reg]);
276     }
277 }
278 
279 /*
280  * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
281  * This is used for load/store for which use of PC implies (literal),
282  * or ADD that implies ADR.
283  */
add_reg_for_lit(DisasContext * s,int reg,int ofs)284 TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
285 {
286     TCGv_i32 tmp = tcg_temp_new_i32();
287 
288     if (reg == 15) {
289         /*
290          * This address is computed from an aligned PC:
291          * subtract off the low bits.
292          */
293         gen_pc_plus_diff(s, tmp, jmp_diff(s, ofs - (s->pc_curr & 3)));
294     } else {
295         tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
296     }
297     return tmp;
298 }
299 
300 /* Set a CPU register.  The source must be a temporary and will be
301    marked as dead.  */
store_reg(DisasContext * s,int reg,TCGv_i32 var)302 void store_reg(DisasContext *s, int reg, TCGv_i32 var)
303 {
304     if (reg == 15) {
305         /* In Thumb mode, we must ignore bit 0.
306          * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
307          * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
308          * We choose to ignore [1:0] in ARM mode for all architecture versions.
309          */
310         tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
311         s->base.is_jmp = DISAS_JUMP;
312         s->pc_save = -1;
313     } else if (reg == 13 && arm_dc_feature(s, ARM_FEATURE_M)) {
314         /* For M-profile SP bits [1:0] are always zero */
315         tcg_gen_andi_i32(var, var, ~3);
316     }
317     tcg_gen_mov_i32(cpu_R[reg], var);
318 }
319 
320 /*
321  * Variant of store_reg which applies v8M stack-limit checks before updating
322  * SP. If the check fails this will result in an exception being taken.
323  * We disable the stack checks for CONFIG_USER_ONLY because we have
324  * no idea what the stack limits should be in that case.
325  * If stack checking is not being done this just acts like store_reg().
326  */
store_sp_checked(DisasContext * s,TCGv_i32 var)327 static void store_sp_checked(DisasContext *s, TCGv_i32 var)
328 {
329 #ifndef CONFIG_USER_ONLY
330     if (s->v8m_stackcheck) {
331         gen_helper_v8m_stackcheck(tcg_env, var);
332     }
333 #endif
334     store_reg(s, 13, var);
335 }
336 
337 /* Value extensions.  */
338 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
339 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
340 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
341 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
342 
343 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
344 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
345 
gen_set_cpsr(TCGv_i32 var,uint32_t mask)346 void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
347 {
348     gen_helper_cpsr_write(tcg_env, var, tcg_constant_i32(mask));
349 }
350 
gen_rebuild_hflags(DisasContext * s,bool new_el)351 static void gen_rebuild_hflags(DisasContext *s, bool new_el)
352 {
353     bool m_profile = arm_dc_feature(s, ARM_FEATURE_M);
354 
355     if (new_el) {
356         if (m_profile) {
357             gen_helper_rebuild_hflags_m32_newel(tcg_env);
358         } else {
359             gen_helper_rebuild_hflags_a32_newel(tcg_env);
360         }
361     } else {
362         TCGv_i32 tcg_el = tcg_constant_i32(s->current_el);
363         if (m_profile) {
364             gen_helper_rebuild_hflags_m32(tcg_env, tcg_el);
365         } else {
366             gen_helper_rebuild_hflags_a32(tcg_env, tcg_el);
367         }
368     }
369 }
370 
gen_exception_internal(int excp)371 static void gen_exception_internal(int excp)
372 {
373     assert(excp_is_internal(excp));
374     gen_helper_exception_internal(tcg_env, tcg_constant_i32(excp));
375 }
376 
gen_singlestep_exception(DisasContext * s)377 static void gen_singlestep_exception(DisasContext *s)
378 {
379     /* We just completed step of an insn. Move from Active-not-pending
380      * to Active-pending, and then also take the swstep exception.
381      * This corresponds to making the (IMPDEF) choice to prioritize
382      * swstep exceptions over asynchronous exceptions taken to an exception
383      * level where debug is disabled. This choice has the advantage that
384      * we do not need to maintain internal state corresponding to the
385      * ISV/EX syndrome bits between completion of the step and generation
386      * of the exception, and our syndrome information is always correct.
387      */
388     gen_ss_advance(s);
389     gen_swstep_exception(s, 1, s->is_ldex);
390     s->base.is_jmp = DISAS_NORETURN;
391 }
392 
clear_eci_state(DisasContext * s)393 void clear_eci_state(DisasContext *s)
394 {
395     /*
396      * Clear any ECI/ICI state: used when a load multiple/store
397      * multiple insn executes.
398      */
399     if (s->eci) {
400         store_cpu_field_constant(0, condexec_bits);
401         s->eci = 0;
402     }
403 }
404 
gen_smul_dual(TCGv_i32 a,TCGv_i32 b)405 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
406 {
407     TCGv_i32 tmp1 = tcg_temp_new_i32();
408     TCGv_i32 tmp2 = tcg_temp_new_i32();
409     tcg_gen_ext16s_i32(tmp1, a);
410     tcg_gen_ext16s_i32(tmp2, b);
411     tcg_gen_mul_i32(tmp1, tmp1, tmp2);
412     tcg_gen_sari_i32(a, a, 16);
413     tcg_gen_sari_i32(b, b, 16);
414     tcg_gen_mul_i32(b, b, a);
415     tcg_gen_mov_i32(a, tmp1);
416 }
417 
418 /* Byteswap each halfword.  */
gen_rev16(TCGv_i32 dest,TCGv_i32 var)419 void gen_rev16(TCGv_i32 dest, TCGv_i32 var)
420 {
421     TCGv_i32 tmp = tcg_temp_new_i32();
422     TCGv_i32 mask = tcg_constant_i32(0x00ff00ff);
423     tcg_gen_shri_i32(tmp, var, 8);
424     tcg_gen_and_i32(tmp, tmp, mask);
425     tcg_gen_and_i32(var, var, mask);
426     tcg_gen_shli_i32(var, var, 8);
427     tcg_gen_or_i32(dest, var, tmp);
428 }
429 
430 /* Byteswap low halfword and sign extend.  */
gen_revsh(TCGv_i32 dest,TCGv_i32 var)431 static void gen_revsh(TCGv_i32 dest, TCGv_i32 var)
432 {
433     tcg_gen_bswap16_i32(var, var, TCG_BSWAP_OS);
434 }
435 
436 /* Dual 16-bit add.  Result placed in t0 and t1 is marked as dead.
437     tmp = (t0 ^ t1) & 0x8000;
438     t0 &= ~0x8000;
439     t1 &= ~0x8000;
440     t0 = (t0 + t1) ^ tmp;
441  */
442 
gen_add16(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)443 static void gen_add16(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
444 {
445     TCGv_i32 tmp = tcg_temp_new_i32();
446     tcg_gen_xor_i32(tmp, t0, t1);
447     tcg_gen_andi_i32(tmp, tmp, 0x8000);
448     tcg_gen_andi_i32(t0, t0, ~0x8000);
449     tcg_gen_andi_i32(t1, t1, ~0x8000);
450     tcg_gen_add_i32(t0, t0, t1);
451     tcg_gen_xor_i32(dest, t0, tmp);
452 }
453 
454 /* Set N and Z flags from var.  */
gen_logic_CC(TCGv_i32 var)455 static inline void gen_logic_CC(TCGv_i32 var)
456 {
457     tcg_gen_mov_i32(cpu_NF, var);
458     tcg_gen_mov_i32(cpu_ZF, var);
459 }
460 
461 /* dest = T0 + T1 + CF. */
gen_add_carry(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)462 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
463 {
464     tcg_gen_add_i32(dest, t0, t1);
465     tcg_gen_add_i32(dest, dest, cpu_CF);
466 }
467 
468 /* dest = T0 - T1 + CF - 1.  */
gen_sub_carry(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)469 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
470 {
471     tcg_gen_sub_i32(dest, t0, t1);
472     tcg_gen_add_i32(dest, dest, cpu_CF);
473     tcg_gen_subi_i32(dest, dest, 1);
474 }
475 
476 /* dest = T0 + T1. Compute C, N, V and Z flags */
gen_add_CC(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)477 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
478 {
479     TCGv_i32 tmp = tcg_temp_new_i32();
480     tcg_gen_movi_i32(tmp, 0);
481     tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
482     tcg_gen_mov_i32(cpu_ZF, cpu_NF);
483     tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
484     tcg_gen_xor_i32(tmp, t0, t1);
485     tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
486     tcg_gen_mov_i32(dest, cpu_NF);
487 }
488 
489 /* dest = T0 + T1 + CF.  Compute C, N, V and Z flags */
gen_adc_CC(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)490 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
491 {
492     TCGv_i32 tmp = tcg_temp_new_i32();
493     if (TCG_TARGET_HAS_add2_i32) {
494         tcg_gen_movi_i32(tmp, 0);
495         tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
496         tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
497     } else {
498         TCGv_i64 q0 = tcg_temp_new_i64();
499         TCGv_i64 q1 = tcg_temp_new_i64();
500         tcg_gen_extu_i32_i64(q0, t0);
501         tcg_gen_extu_i32_i64(q1, t1);
502         tcg_gen_add_i64(q0, q0, q1);
503         tcg_gen_extu_i32_i64(q1, cpu_CF);
504         tcg_gen_add_i64(q0, q0, q1);
505         tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
506     }
507     tcg_gen_mov_i32(cpu_ZF, cpu_NF);
508     tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
509     tcg_gen_xor_i32(tmp, t0, t1);
510     tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
511     tcg_gen_mov_i32(dest, cpu_NF);
512 }
513 
514 /* dest = T0 - T1. Compute C, N, V and Z flags */
gen_sub_CC(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)515 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
516 {
517     TCGv_i32 tmp;
518     tcg_gen_sub_i32(cpu_NF, t0, t1);
519     tcg_gen_mov_i32(cpu_ZF, cpu_NF);
520     tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
521     tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
522     tmp = tcg_temp_new_i32();
523     tcg_gen_xor_i32(tmp, t0, t1);
524     tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
525     tcg_gen_mov_i32(dest, cpu_NF);
526 }
527 
528 /* dest = T0 + ~T1 + CF.  Compute C, N, V and Z flags */
gen_sbc_CC(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)529 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
530 {
531     TCGv_i32 tmp = tcg_temp_new_i32();
532     tcg_gen_not_i32(tmp, t1);
533     gen_adc_CC(dest, t0, tmp);
534 }
535 
536 #define GEN_SHIFT(name)                                               \
537 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)       \
538 {                                                                     \
539     TCGv_i32 tmpd = tcg_temp_new_i32();                               \
540     TCGv_i32 tmp1 = tcg_temp_new_i32();                               \
541     TCGv_i32 zero = tcg_constant_i32(0);                              \
542     tcg_gen_andi_i32(tmp1, t1, 0x1f);                                 \
543     tcg_gen_##name##_i32(tmpd, t0, tmp1);                             \
544     tcg_gen_andi_i32(tmp1, t1, 0xe0);                                 \
545     tcg_gen_movcond_i32(TCG_COND_NE, dest, tmp1, zero, zero, tmpd);   \
546 }
547 GEN_SHIFT(shl)
GEN_SHIFT(shr)548 GEN_SHIFT(shr)
549 #undef GEN_SHIFT
550 
551 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
552 {
553     TCGv_i32 tmp1 = tcg_temp_new_i32();
554 
555     tcg_gen_andi_i32(tmp1, t1, 0xff);
556     tcg_gen_umin_i32(tmp1, tmp1, tcg_constant_i32(31));
557     tcg_gen_sar_i32(dest, t0, tmp1);
558 }
559 
shifter_out_im(TCGv_i32 var,int shift)560 static void shifter_out_im(TCGv_i32 var, int shift)
561 {
562     tcg_gen_extract_i32(cpu_CF, var, shift, 1);
563 }
564 
565 /* Shift by immediate.  Includes special handling for shift == 0.  */
gen_arm_shift_im(TCGv_i32 var,int shiftop,int shift,int flags)566 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
567                                     int shift, int flags)
568 {
569     switch (shiftop) {
570     case 0: /* LSL */
571         if (shift != 0) {
572             if (flags)
573                 shifter_out_im(var, 32 - shift);
574             tcg_gen_shli_i32(var, var, shift);
575         }
576         break;
577     case 1: /* LSR */
578         if (shift == 0) {
579             if (flags) {
580                 tcg_gen_shri_i32(cpu_CF, var, 31);
581             }
582             tcg_gen_movi_i32(var, 0);
583         } else {
584             if (flags)
585                 shifter_out_im(var, shift - 1);
586             tcg_gen_shri_i32(var, var, shift);
587         }
588         break;
589     case 2: /* ASR */
590         if (shift == 0)
591             shift = 32;
592         if (flags)
593             shifter_out_im(var, shift - 1);
594         if (shift == 32)
595           shift = 31;
596         tcg_gen_sari_i32(var, var, shift);
597         break;
598     case 3: /* ROR/RRX */
599         if (shift != 0) {
600             if (flags)
601                 shifter_out_im(var, shift - 1);
602             tcg_gen_rotri_i32(var, var, shift); break;
603         } else {
604             TCGv_i32 tmp = tcg_temp_new_i32();
605             tcg_gen_shli_i32(tmp, cpu_CF, 31);
606             if (flags)
607                 shifter_out_im(var, 0);
608             tcg_gen_shri_i32(var, var, 1);
609             tcg_gen_or_i32(var, var, tmp);
610         }
611     }
612 };
613 
gen_arm_shift_reg(TCGv_i32 var,int shiftop,TCGv_i32 shift,int flags)614 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
615                                      TCGv_i32 shift, int flags)
616 {
617     if (flags) {
618         switch (shiftop) {
619         case 0: gen_helper_shl_cc(var, tcg_env, var, shift); break;
620         case 1: gen_helper_shr_cc(var, tcg_env, var, shift); break;
621         case 2: gen_helper_sar_cc(var, tcg_env, var, shift); break;
622         case 3: gen_helper_ror_cc(var, tcg_env, var, shift); break;
623         }
624     } else {
625         switch (shiftop) {
626         case 0:
627             gen_shl(var, var, shift);
628             break;
629         case 1:
630             gen_shr(var, var, shift);
631             break;
632         case 2:
633             gen_sar(var, var, shift);
634             break;
635         case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
636                 tcg_gen_rotr_i32(var, var, shift); break;
637         }
638     }
639 }
640 
641 /*
642  * Generate a conditional based on ARM condition code cc.
643  * This is common between ARM and Aarch64 targets.
644  */
arm_test_cc(DisasCompare * cmp,int cc)645 void arm_test_cc(DisasCompare *cmp, int cc)
646 {
647     TCGv_i32 value;
648     TCGCond cond;
649 
650     switch (cc) {
651     case 0: /* eq: Z */
652     case 1: /* ne: !Z */
653         cond = TCG_COND_EQ;
654         value = cpu_ZF;
655         break;
656 
657     case 2: /* cs: C */
658     case 3: /* cc: !C */
659         cond = TCG_COND_NE;
660         value = cpu_CF;
661         break;
662 
663     case 4: /* mi: N */
664     case 5: /* pl: !N */
665         cond = TCG_COND_LT;
666         value = cpu_NF;
667         break;
668 
669     case 6: /* vs: V */
670     case 7: /* vc: !V */
671         cond = TCG_COND_LT;
672         value = cpu_VF;
673         break;
674 
675     case 8: /* hi: C && !Z */
676     case 9: /* ls: !C || Z -> !(C && !Z) */
677         cond = TCG_COND_NE;
678         value = tcg_temp_new_i32();
679         /* CF is 1 for C, so -CF is an all-bits-set mask for C;
680            ZF is non-zero for !Z; so AND the two subexpressions.  */
681         tcg_gen_neg_i32(value, cpu_CF);
682         tcg_gen_and_i32(value, value, cpu_ZF);
683         break;
684 
685     case 10: /* ge: N == V -> N ^ V == 0 */
686     case 11: /* lt: N != V -> N ^ V != 0 */
687         /* Since we're only interested in the sign bit, == 0 is >= 0.  */
688         cond = TCG_COND_GE;
689         value = tcg_temp_new_i32();
690         tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
691         break;
692 
693     case 12: /* gt: !Z && N == V */
694     case 13: /* le: Z || N != V */
695         cond = TCG_COND_NE;
696         value = tcg_temp_new_i32();
697         /* (N == V) is equal to the sign bit of ~(NF ^ VF).  Propagate
698          * the sign bit then AND with ZF to yield the result.  */
699         tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
700         tcg_gen_sari_i32(value, value, 31);
701         tcg_gen_andc_i32(value, cpu_ZF, value);
702         break;
703 
704     case 14: /* always */
705     case 15: /* always */
706         /* Use the ALWAYS condition, which will fold early.
707          * It doesn't matter what we use for the value.  */
708         cond = TCG_COND_ALWAYS;
709         value = cpu_ZF;
710         goto no_invert;
711 
712     default:
713         fprintf(stderr, "Bad condition code 0x%x\n", cc);
714         abort();
715     }
716 
717     if (cc & 1) {
718         cond = tcg_invert_cond(cond);
719     }
720 
721  no_invert:
722     cmp->cond = cond;
723     cmp->value = value;
724 }
725 
arm_jump_cc(DisasCompare * cmp,TCGLabel * label)726 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
727 {
728     tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
729 }
730 
arm_gen_test_cc(int cc,TCGLabel * label)731 void arm_gen_test_cc(int cc, TCGLabel *label)
732 {
733     DisasCompare cmp;
734     arm_test_cc(&cmp, cc);
735     arm_jump_cc(&cmp, label);
736 }
737 
gen_set_condexec(DisasContext * s)738 void gen_set_condexec(DisasContext *s)
739 {
740     if (s->condexec_mask) {
741         uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
742 
743         store_cpu_field_constant(val, condexec_bits);
744     }
745 }
746 
gen_update_pc(DisasContext * s,target_long diff)747 void gen_update_pc(DisasContext *s, target_long diff)
748 {
749     gen_pc_plus_diff(s, cpu_R[15], diff);
750     s->pc_save = s->pc_curr + diff;
751 }
752 
753 /* Set PC and Thumb state from var.  var is marked as dead.  */
gen_bx(DisasContext * s,TCGv_i32 var)754 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
755 {
756     s->base.is_jmp = DISAS_JUMP;
757     tcg_gen_andi_i32(cpu_R[15], var, ~1);
758     tcg_gen_andi_i32(var, var, 1);
759     store_cpu_field(var, thumb);
760     s->pc_save = -1;
761 }
762 
763 /*
764  * Set PC and Thumb state from var. var is marked as dead.
765  * For M-profile CPUs, include logic to detect exception-return
766  * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
767  * and BX reg, and no others, and happens only for code in Handler mode.
768  * The Security Extension also requires us to check for the FNC_RETURN
769  * which signals a function return from non-secure state; this can happen
770  * in both Handler and Thread mode.
771  * To avoid having to do multiple comparisons in inline generated code,
772  * we make the check we do here loose, so it will match for EXC_RETURN
773  * in Thread mode. For system emulation do_v7m_exception_exit() checks
774  * for these spurious cases and returns without doing anything (giving
775  * the same behaviour as for a branch to a non-magic address).
776  *
777  * In linux-user mode it is unclear what the right behaviour for an
778  * attempted FNC_RETURN should be, because in real hardware this will go
779  * directly to Secure code (ie not the Linux kernel) which will then treat
780  * the error in any way it chooses. For QEMU we opt to make the FNC_RETURN
781  * attempt behave the way it would on a CPU without the security extension,
782  * which is to say "like a normal branch". That means we can simply treat
783  * all branches as normal with no magic address behaviour.
784  */
gen_bx_excret(DisasContext * s,TCGv_i32 var)785 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
786 {
787     /* Generate the same code here as for a simple bx, but flag via
788      * s->base.is_jmp that we need to do the rest of the work later.
789      */
790     gen_bx(s, var);
791 #ifndef CONFIG_USER_ONLY
792     if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
793         (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
794         s->base.is_jmp = DISAS_BX_EXCRET;
795     }
796 #endif
797 }
798 
gen_bx_excret_final_code(DisasContext * s)799 static inline void gen_bx_excret_final_code(DisasContext *s)
800 {
801     /* Generate the code to finish possible exception return and end the TB */
802     DisasLabel excret_label = gen_disas_label(s);
803     uint32_t min_magic;
804 
805     if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
806         /* Covers FNC_RETURN and EXC_RETURN magic */
807         min_magic = FNC_RETURN_MIN_MAGIC;
808     } else {
809         /* EXC_RETURN magic only */
810         min_magic = EXC_RETURN_MIN_MAGIC;
811     }
812 
813     /* Is the new PC value in the magic range indicating exception return? */
814     tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label.label);
815     /* No: end the TB as we would for a DISAS_JMP */
816     if (s->ss_active) {
817         gen_singlestep_exception(s);
818     } else {
819         tcg_gen_exit_tb(NULL, 0);
820     }
821     set_disas_label(s, excret_label);
822     /* Yes: this is an exception return.
823      * At this point in runtime env->regs[15] and env->thumb will hold
824      * the exception-return magic number, which do_v7m_exception_exit()
825      * will read. Nothing else will be able to see those values because
826      * the cpu-exec main loop guarantees that we will always go straight
827      * from raising the exception to the exception-handling code.
828      *
829      * gen_ss_advance(s) does nothing on M profile currently but
830      * calling it is conceptually the right thing as we have executed
831      * this instruction (compare SWI, HVC, SMC handling).
832      */
833     gen_ss_advance(s);
834     gen_exception_internal(EXCP_EXCEPTION_EXIT);
835 }
836 
gen_bxns(DisasContext * s,int rm)837 static inline void gen_bxns(DisasContext *s, int rm)
838 {
839     TCGv_i32 var = load_reg(s, rm);
840 
841     /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
842      * we need to sync state before calling it, but:
843      *  - we don't need to do gen_update_pc() because the bxns helper will
844      *    always set the PC itself
845      *  - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
846      *    unless it's outside an IT block or the last insn in an IT block,
847      *    so we know that condexec == 0 (already set at the top of the TB)
848      *    is correct in the non-UNPREDICTABLE cases, and we can choose
849      *    "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
850      */
851     gen_helper_v7m_bxns(tcg_env, var);
852     s->base.is_jmp = DISAS_EXIT;
853 }
854 
gen_blxns(DisasContext * s,int rm)855 static inline void gen_blxns(DisasContext *s, int rm)
856 {
857     TCGv_i32 var = load_reg(s, rm);
858 
859     /* We don't need to sync condexec state, for the same reason as bxns.
860      * We do however need to set the PC, because the blxns helper reads it.
861      * The blxns helper may throw an exception.
862      */
863     gen_update_pc(s, curr_insn_len(s));
864     gen_helper_v7m_blxns(tcg_env, var);
865     s->base.is_jmp = DISAS_EXIT;
866 }
867 
868 /* Variant of store_reg which uses branch&exchange logic when storing
869    to r15 in ARM architecture v7 and above. The source must be a temporary
870    and will be marked as dead. */
store_reg_bx(DisasContext * s,int reg,TCGv_i32 var)871 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
872 {
873     if (reg == 15 && ENABLE_ARCH_7) {
874         gen_bx(s, var);
875     } else {
876         store_reg(s, reg, var);
877     }
878 }
879 
880 /* Variant of store_reg which uses branch&exchange logic when storing
881  * to r15 in ARM architecture v5T and above. This is used for storing
882  * the results of a LDR/LDM/POP into r15, and corresponds to the cases
883  * in the ARM ARM which use the LoadWritePC() pseudocode function. */
store_reg_from_load(DisasContext * s,int reg,TCGv_i32 var)884 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
885 {
886     if (reg == 15 && ENABLE_ARCH_5) {
887         gen_bx_excret(s, var);
888     } else {
889         store_reg(s, reg, var);
890     }
891 }
892 
893 #ifdef CONFIG_USER_ONLY
894 #define IS_USER_ONLY 1
895 #else
896 #define IS_USER_ONLY 0
897 #endif
898 
pow2_align(unsigned i)899 MemOp pow2_align(unsigned i)
900 {
901     static const MemOp mop_align[] = {
902         0, MO_ALIGN_2, MO_ALIGN_4, MO_ALIGN_8, MO_ALIGN_16, MO_ALIGN_32
903     };
904     g_assert(i < ARRAY_SIZE(mop_align));
905     return mop_align[i];
906 }
907 
908 /*
909  * Abstractions of "generate code to do a guest load/store for
910  * AArch32", where a vaddr is always 32 bits (and is zero
911  * extended if we're a 64 bit core) and  data is also
912  * 32 bits unless specifically doing a 64 bit access.
913  * These functions work like tcg_gen_qemu_{ld,st}* except
914  * that the address argument is TCGv_i32 rather than TCGv.
915  */
916 
gen_aa32_addr(DisasContext * s,TCGv_i32 a32,MemOp op)917 static TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
918 {
919     TCGv addr = tcg_temp_new();
920     tcg_gen_extu_i32_tl(addr, a32);
921 
922     /* Not needed for user-mode BE32, where we use MO_BE instead.  */
923     if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
924         tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
925     }
926     return addr;
927 }
928 
929 /*
930  * Internal routines are used for NEON cases where the endianness
931  * and/or alignment has already been taken into account and manipulated.
932  */
gen_aa32_ld_internal_i32(DisasContext * s,TCGv_i32 val,TCGv_i32 a32,int index,MemOp opc)933 void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val,
934                               TCGv_i32 a32, int index, MemOp opc)
935 {
936     TCGv addr = gen_aa32_addr(s, a32, opc);
937     tcg_gen_qemu_ld_i32(val, addr, index, opc);
938 }
939 
gen_aa32_st_internal_i32(DisasContext * s,TCGv_i32 val,TCGv_i32 a32,int index,MemOp opc)940 void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
941                               TCGv_i32 a32, int index, MemOp opc)
942 {
943     TCGv addr = gen_aa32_addr(s, a32, opc);
944     tcg_gen_qemu_st_i32(val, addr, index, opc);
945 }
946 
gen_aa32_ld_internal_i64(DisasContext * s,TCGv_i64 val,TCGv_i32 a32,int index,MemOp opc)947 void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val,
948                               TCGv_i32 a32, int index, MemOp opc)
949 {
950     TCGv addr = gen_aa32_addr(s, a32, opc);
951 
952     tcg_gen_qemu_ld_i64(val, addr, index, opc);
953 
954     /* Not needed for user-mode BE32, where we use MO_BE instead.  */
955     if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
956         tcg_gen_rotri_i64(val, val, 32);
957     }
958 }
959 
gen_aa32_st_internal_i64(DisasContext * s,TCGv_i64 val,TCGv_i32 a32,int index,MemOp opc)960 void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val,
961                               TCGv_i32 a32, int index, MemOp opc)
962 {
963     TCGv addr = gen_aa32_addr(s, a32, opc);
964 
965     /* Not needed for user-mode BE32, where we use MO_BE instead.  */
966     if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
967         TCGv_i64 tmp = tcg_temp_new_i64();
968         tcg_gen_rotri_i64(tmp, val, 32);
969         tcg_gen_qemu_st_i64(tmp, addr, index, opc);
970     } else {
971         tcg_gen_qemu_st_i64(val, addr, index, opc);
972     }
973 }
974 
gen_aa32_ld_i32(DisasContext * s,TCGv_i32 val,TCGv_i32 a32,int index,MemOp opc)975 void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
976                      int index, MemOp opc)
977 {
978     gen_aa32_ld_internal_i32(s, val, a32, index, finalize_memop(s, opc));
979 }
980 
gen_aa32_st_i32(DisasContext * s,TCGv_i32 val,TCGv_i32 a32,int index,MemOp opc)981 void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
982                      int index, MemOp opc)
983 {
984     gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc));
985 }
986 
gen_aa32_ld_i64(DisasContext * s,TCGv_i64 val,TCGv_i32 a32,int index,MemOp opc)987 void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
988                      int index, MemOp opc)
989 {
990     gen_aa32_ld_internal_i64(s, val, a32, index, finalize_memop(s, opc));
991 }
992 
gen_aa32_st_i64(DisasContext * s,TCGv_i64 val,TCGv_i32 a32,int index,MemOp opc)993 void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
994                      int index, MemOp opc)
995 {
996     gen_aa32_st_internal_i64(s, val, a32, index, finalize_memop(s, opc));
997 }
998 
999 #define DO_GEN_LD(SUFF, OPC)                                            \
1000     static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1001                                          TCGv_i32 a32, int index)       \
1002     {                                                                   \
1003         gen_aa32_ld_i32(s, val, a32, index, OPC);                       \
1004     }
1005 
1006 #define DO_GEN_ST(SUFF, OPC)                                            \
1007     static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1008                                          TCGv_i32 a32, int index)       \
1009     {                                                                   \
1010         gen_aa32_st_i32(s, val, a32, index, OPC);                       \
1011     }
1012 
gen_hvc(DisasContext * s,int imm16)1013 static inline void gen_hvc(DisasContext *s, int imm16)
1014 {
1015     /* The pre HVC helper handles cases when HVC gets trapped
1016      * as an undefined insn by runtime configuration (ie before
1017      * the insn really executes).
1018      */
1019     gen_update_pc(s, 0);
1020     gen_helper_pre_hvc(tcg_env);
1021     /* Otherwise we will treat this as a real exception which
1022      * happens after execution of the insn. (The distinction matters
1023      * for the PC value reported to the exception handler and also
1024      * for single stepping.)
1025      */
1026     s->svc_imm = imm16;
1027     gen_update_pc(s, curr_insn_len(s));
1028     s->base.is_jmp = DISAS_HVC;
1029 }
1030 
gen_smc(DisasContext * s)1031 static inline void gen_smc(DisasContext *s)
1032 {
1033     /* As with HVC, we may take an exception either before or after
1034      * the insn executes.
1035      */
1036     gen_update_pc(s, 0);
1037     gen_helper_pre_smc(tcg_env, tcg_constant_i32(syn_aa32_smc()));
1038     gen_update_pc(s, curr_insn_len(s));
1039     s->base.is_jmp = DISAS_SMC;
1040 }
1041 
gen_exception_internal_insn(DisasContext * s,int excp)1042 static void gen_exception_internal_insn(DisasContext *s, int excp)
1043 {
1044     gen_set_condexec(s);
1045     gen_update_pc(s, 0);
1046     gen_exception_internal(excp);
1047     s->base.is_jmp = DISAS_NORETURN;
1048 }
1049 
gen_exception_el_v(int excp,uint32_t syndrome,TCGv_i32 tcg_el)1050 static void gen_exception_el_v(int excp, uint32_t syndrome, TCGv_i32 tcg_el)
1051 {
1052     gen_helper_exception_with_syndrome_el(tcg_env, tcg_constant_i32(excp),
1053                                           tcg_constant_i32(syndrome), tcg_el);
1054 }
1055 
gen_exception_el(int excp,uint32_t syndrome,uint32_t target_el)1056 static void gen_exception_el(int excp, uint32_t syndrome, uint32_t target_el)
1057 {
1058     gen_exception_el_v(excp, syndrome, tcg_constant_i32(target_el));
1059 }
1060 
gen_exception(int excp,uint32_t syndrome)1061 static void gen_exception(int excp, uint32_t syndrome)
1062 {
1063     gen_helper_exception_with_syndrome(tcg_env, tcg_constant_i32(excp),
1064                                        tcg_constant_i32(syndrome));
1065 }
1066 
gen_exception_insn_el_v(DisasContext * s,target_long pc_diff,int excp,uint32_t syn,TCGv_i32 tcg_el)1067 static void gen_exception_insn_el_v(DisasContext *s, target_long pc_diff,
1068                                     int excp, uint32_t syn, TCGv_i32 tcg_el)
1069 {
1070     if (s->aarch64) {
1071         gen_a64_update_pc(s, pc_diff);
1072     } else {
1073         gen_set_condexec(s);
1074         gen_update_pc(s, pc_diff);
1075     }
1076     gen_exception_el_v(excp, syn, tcg_el);
1077     s->base.is_jmp = DISAS_NORETURN;
1078 }
1079 
gen_exception_insn_el(DisasContext * s,target_long pc_diff,int excp,uint32_t syn,uint32_t target_el)1080 void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp,
1081                            uint32_t syn, uint32_t target_el)
1082 {
1083     gen_exception_insn_el_v(s, pc_diff, excp, syn,
1084                             tcg_constant_i32(target_el));
1085 }
1086 
gen_exception_insn(DisasContext * s,target_long pc_diff,int excp,uint32_t syn)1087 void gen_exception_insn(DisasContext *s, target_long pc_diff,
1088                         int excp, uint32_t syn)
1089 {
1090     if (s->aarch64) {
1091         gen_a64_update_pc(s, pc_diff);
1092     } else {
1093         gen_set_condexec(s);
1094         gen_update_pc(s, pc_diff);
1095     }
1096     gen_exception(excp, syn);
1097     s->base.is_jmp = DISAS_NORETURN;
1098 }
1099 
gen_exception_bkpt_insn(DisasContext * s,uint32_t syn)1100 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
1101 {
1102     gen_set_condexec(s);
1103     gen_update_pc(s, 0);
1104     gen_helper_exception_bkpt_insn(tcg_env, tcg_constant_i32(syn));
1105     s->base.is_jmp = DISAS_NORETURN;
1106 }
1107 
unallocated_encoding(DisasContext * s)1108 void unallocated_encoding(DisasContext *s)
1109 {
1110     /* Unallocated and reserved encodings are uncategorized */
1111     gen_exception_insn(s, 0, EXCP_UDEF, syn_uncategorized());
1112 }
1113 
1114 /* Force a TB lookup after an instruction that changes the CPU state.  */
gen_lookup_tb(DisasContext * s)1115 void gen_lookup_tb(DisasContext *s)
1116 {
1117     gen_pc_plus_diff(s, cpu_R[15], curr_insn_len(s));
1118     s->base.is_jmp = DISAS_EXIT;
1119 }
1120 
gen_hlt(DisasContext * s,int imm)1121 static inline void gen_hlt(DisasContext *s, int imm)
1122 {
1123     /* HLT. This has two purposes.
1124      * Architecturally, it is an external halting debug instruction.
1125      * Since QEMU doesn't implement external debug, we treat this as
1126      * it is required for halting debug disabled: it will UNDEF.
1127      * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1128      * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1129      * must trigger semihosting even for ARMv7 and earlier, where
1130      * HLT was an undefined encoding.
1131      * In system mode, we don't allow userspace access to
1132      * semihosting, to provide some semblance of security
1133      * (and for consistency with our 32-bit semihosting).
1134      */
1135     if (semihosting_enabled(s->current_el == 0) &&
1136         (imm == (s->thumb ? 0x3c : 0xf000))) {
1137         gen_exception_internal_insn(s, EXCP_SEMIHOST);
1138         return;
1139     }
1140 
1141     unallocated_encoding(s);
1142 }
1143 
1144 /*
1145  * Return the offset of a "full" NEON Dreg.
1146  */
neon_full_reg_offset(unsigned reg)1147 long neon_full_reg_offset(unsigned reg)
1148 {
1149     return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
1150 }
1151 
1152 /*
1153  * Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1154  * where 0 is the least significant end of the register.
1155  */
neon_element_offset(int reg,int element,MemOp memop)1156 long neon_element_offset(int reg, int element, MemOp memop)
1157 {
1158     int element_size = 1 << (memop & MO_SIZE);
1159     int ofs = element * element_size;
1160 #if HOST_BIG_ENDIAN
1161     /*
1162      * Calculate the offset assuming fully little-endian,
1163      * then XOR to account for the order of the 8-byte units.
1164      */
1165     if (element_size < 8) {
1166         ofs ^= 8 - element_size;
1167     }
1168 #endif
1169     return neon_full_reg_offset(reg) + ofs;
1170 }
1171 
1172 /* Return the offset of a VFP Dreg (dp = true) or VFP Sreg (dp = false). */
vfp_reg_offset(bool dp,unsigned reg)1173 long vfp_reg_offset(bool dp, unsigned reg)
1174 {
1175     if (dp) {
1176         return neon_element_offset(reg, 0, MO_64);
1177     } else {
1178         return neon_element_offset(reg >> 1, reg & 1, MO_32);
1179     }
1180 }
1181 
read_neon_element32(TCGv_i32 dest,int reg,int ele,MemOp memop)1182 void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop)
1183 {
1184     long off = neon_element_offset(reg, ele, memop);
1185 
1186     switch (memop) {
1187     case MO_SB:
1188         tcg_gen_ld8s_i32(dest, tcg_env, off);
1189         break;
1190     case MO_UB:
1191         tcg_gen_ld8u_i32(dest, tcg_env, off);
1192         break;
1193     case MO_SW:
1194         tcg_gen_ld16s_i32(dest, tcg_env, off);
1195         break;
1196     case MO_UW:
1197         tcg_gen_ld16u_i32(dest, tcg_env, off);
1198         break;
1199     case MO_UL:
1200     case MO_SL:
1201         tcg_gen_ld_i32(dest, tcg_env, off);
1202         break;
1203     default:
1204         g_assert_not_reached();
1205     }
1206 }
1207 
read_neon_element64(TCGv_i64 dest,int reg,int ele,MemOp memop)1208 void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop)
1209 {
1210     long off = neon_element_offset(reg, ele, memop);
1211 
1212     switch (memop) {
1213     case MO_SL:
1214         tcg_gen_ld32s_i64(dest, tcg_env, off);
1215         break;
1216     case MO_UL:
1217         tcg_gen_ld32u_i64(dest, tcg_env, off);
1218         break;
1219     case MO_UQ:
1220         tcg_gen_ld_i64(dest, tcg_env, off);
1221         break;
1222     default:
1223         g_assert_not_reached();
1224     }
1225 }
1226 
write_neon_element32(TCGv_i32 src,int reg,int ele,MemOp memop)1227 void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
1228 {
1229     long off = neon_element_offset(reg, ele, memop);
1230 
1231     switch (memop) {
1232     case MO_8:
1233         tcg_gen_st8_i32(src, tcg_env, off);
1234         break;
1235     case MO_16:
1236         tcg_gen_st16_i32(src, tcg_env, off);
1237         break;
1238     case MO_32:
1239         tcg_gen_st_i32(src, tcg_env, off);
1240         break;
1241     default:
1242         g_assert_not_reached();
1243     }
1244 }
1245 
write_neon_element64(TCGv_i64 src,int reg,int ele,MemOp memop)1246 void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop)
1247 {
1248     long off = neon_element_offset(reg, ele, memop);
1249 
1250     switch (memop) {
1251     case MO_32:
1252         tcg_gen_st32_i64(src, tcg_env, off);
1253         break;
1254     case MO_64:
1255         tcg_gen_st_i64(src, tcg_env, off);
1256         break;
1257     default:
1258         g_assert_not_reached();
1259     }
1260 }
1261 
1262 #define ARM_CP_RW_BIT   (1 << 20)
1263 
iwmmxt_load_reg(TCGv_i64 var,int reg)1264 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1265 {
1266     tcg_gen_ld_i64(var, tcg_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1267 }
1268 
iwmmxt_store_reg(TCGv_i64 var,int reg)1269 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1270 {
1271     tcg_gen_st_i64(var, tcg_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1272 }
1273 
iwmmxt_load_creg(int reg)1274 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1275 {
1276     TCGv_i32 var = tcg_temp_new_i32();
1277     tcg_gen_ld_i32(var, tcg_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1278     return var;
1279 }
1280 
iwmmxt_store_creg(int reg,TCGv_i32 var)1281 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1282 {
1283     tcg_gen_st_i32(var, tcg_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1284 }
1285 
gen_op_iwmmxt_movq_wRn_M0(int rn)1286 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1287 {
1288     iwmmxt_store_reg(cpu_M0, rn);
1289 }
1290 
gen_op_iwmmxt_movq_M0_wRn(int rn)1291 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1292 {
1293     iwmmxt_load_reg(cpu_M0, rn);
1294 }
1295 
gen_op_iwmmxt_orq_M0_wRn(int rn)1296 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1297 {
1298     iwmmxt_load_reg(cpu_V1, rn);
1299     tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1300 }
1301 
gen_op_iwmmxt_andq_M0_wRn(int rn)1302 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1303 {
1304     iwmmxt_load_reg(cpu_V1, rn);
1305     tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1306 }
1307 
gen_op_iwmmxt_xorq_M0_wRn(int rn)1308 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1309 {
1310     iwmmxt_load_reg(cpu_V1, rn);
1311     tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1312 }
1313 
1314 #define IWMMXT_OP(name) \
1315 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1316 { \
1317     iwmmxt_load_reg(cpu_V1, rn); \
1318     gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1319 }
1320 
1321 #define IWMMXT_OP_ENV(name) \
1322 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1323 { \
1324     iwmmxt_load_reg(cpu_V1, rn); \
1325     gen_helper_iwmmxt_##name(cpu_M0, tcg_env, cpu_M0, cpu_V1); \
1326 }
1327 
1328 #define IWMMXT_OP_ENV_SIZE(name) \
1329 IWMMXT_OP_ENV(name##b) \
1330 IWMMXT_OP_ENV(name##w) \
1331 IWMMXT_OP_ENV(name##l)
1332 
1333 #define IWMMXT_OP_ENV1(name) \
1334 static inline void gen_op_iwmmxt_##name##_M0(void) \
1335 { \
1336     gen_helper_iwmmxt_##name(cpu_M0, tcg_env, cpu_M0); \
1337 }
1338 
1339 IWMMXT_OP(maddsq)
IWMMXT_OP(madduq)1340 IWMMXT_OP(madduq)
1341 IWMMXT_OP(sadb)
1342 IWMMXT_OP(sadw)
1343 IWMMXT_OP(mulslw)
1344 IWMMXT_OP(mulshw)
1345 IWMMXT_OP(mululw)
1346 IWMMXT_OP(muluhw)
1347 IWMMXT_OP(macsw)
1348 IWMMXT_OP(macuw)
1349 
1350 IWMMXT_OP_ENV_SIZE(unpackl)
1351 IWMMXT_OP_ENV_SIZE(unpackh)
1352 
1353 IWMMXT_OP_ENV1(unpacklub)
1354 IWMMXT_OP_ENV1(unpackluw)
1355 IWMMXT_OP_ENV1(unpacklul)
1356 IWMMXT_OP_ENV1(unpackhub)
1357 IWMMXT_OP_ENV1(unpackhuw)
1358 IWMMXT_OP_ENV1(unpackhul)
1359 IWMMXT_OP_ENV1(unpacklsb)
1360 IWMMXT_OP_ENV1(unpacklsw)
1361 IWMMXT_OP_ENV1(unpacklsl)
1362 IWMMXT_OP_ENV1(unpackhsb)
1363 IWMMXT_OP_ENV1(unpackhsw)
1364 IWMMXT_OP_ENV1(unpackhsl)
1365 
1366 IWMMXT_OP_ENV_SIZE(cmpeq)
1367 IWMMXT_OP_ENV_SIZE(cmpgtu)
1368 IWMMXT_OP_ENV_SIZE(cmpgts)
1369 
1370 IWMMXT_OP_ENV_SIZE(mins)
1371 IWMMXT_OP_ENV_SIZE(minu)
1372 IWMMXT_OP_ENV_SIZE(maxs)
1373 IWMMXT_OP_ENV_SIZE(maxu)
1374 
1375 IWMMXT_OP_ENV_SIZE(subn)
1376 IWMMXT_OP_ENV_SIZE(addn)
1377 IWMMXT_OP_ENV_SIZE(subu)
1378 IWMMXT_OP_ENV_SIZE(addu)
1379 IWMMXT_OP_ENV_SIZE(subs)
1380 IWMMXT_OP_ENV_SIZE(adds)
1381 
1382 IWMMXT_OP_ENV(avgb0)
1383 IWMMXT_OP_ENV(avgb1)
1384 IWMMXT_OP_ENV(avgw0)
1385 IWMMXT_OP_ENV(avgw1)
1386 
1387 IWMMXT_OP_ENV(packuw)
1388 IWMMXT_OP_ENV(packul)
1389 IWMMXT_OP_ENV(packuq)
1390 IWMMXT_OP_ENV(packsw)
1391 IWMMXT_OP_ENV(packsl)
1392 IWMMXT_OP_ENV(packsq)
1393 
1394 static void gen_op_iwmmxt_set_mup(void)
1395 {
1396     TCGv_i32 tmp;
1397     tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1398     tcg_gen_ori_i32(tmp, tmp, 2);
1399     store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1400 }
1401 
gen_op_iwmmxt_set_cup(void)1402 static void gen_op_iwmmxt_set_cup(void)
1403 {
1404     TCGv_i32 tmp;
1405     tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1406     tcg_gen_ori_i32(tmp, tmp, 1);
1407     store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1408 }
1409 
gen_op_iwmmxt_setpsr_nz(void)1410 static void gen_op_iwmmxt_setpsr_nz(void)
1411 {
1412     TCGv_i32 tmp = tcg_temp_new_i32();
1413     gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1414     store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1415 }
1416 
gen_op_iwmmxt_addl_M0_wRn(int rn)1417 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1418 {
1419     iwmmxt_load_reg(cpu_V1, rn);
1420     tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1421     tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1422 }
1423 
gen_iwmmxt_address(DisasContext * s,uint32_t insn,TCGv_i32 dest)1424 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1425                                      TCGv_i32 dest)
1426 {
1427     int rd;
1428     uint32_t offset;
1429     TCGv_i32 tmp;
1430 
1431     rd = (insn >> 16) & 0xf;
1432     tmp = load_reg(s, rd);
1433 
1434     offset = (insn & 0xff) << ((insn >> 7) & 2);
1435     if (insn & (1 << 24)) {
1436         /* Pre indexed */
1437         if (insn & (1 << 23))
1438             tcg_gen_addi_i32(tmp, tmp, offset);
1439         else
1440             tcg_gen_addi_i32(tmp, tmp, -offset);
1441         tcg_gen_mov_i32(dest, tmp);
1442         if (insn & (1 << 21)) {
1443             store_reg(s, rd, tmp);
1444         }
1445     } else if (insn & (1 << 21)) {
1446         /* Post indexed */
1447         tcg_gen_mov_i32(dest, tmp);
1448         if (insn & (1 << 23))
1449             tcg_gen_addi_i32(tmp, tmp, offset);
1450         else
1451             tcg_gen_addi_i32(tmp, tmp, -offset);
1452         store_reg(s, rd, tmp);
1453     } else if (!(insn & (1 << 23)))
1454         return 1;
1455     return 0;
1456 }
1457 
gen_iwmmxt_shift(uint32_t insn,uint32_t mask,TCGv_i32 dest)1458 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1459 {
1460     int rd = (insn >> 0) & 0xf;
1461     TCGv_i32 tmp;
1462 
1463     if (insn & (1 << 8)) {
1464         if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1465             return 1;
1466         } else {
1467             tmp = iwmmxt_load_creg(rd);
1468         }
1469     } else {
1470         tmp = tcg_temp_new_i32();
1471         iwmmxt_load_reg(cpu_V0, rd);
1472         tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1473     }
1474     tcg_gen_andi_i32(tmp, tmp, mask);
1475     tcg_gen_mov_i32(dest, tmp);
1476     return 0;
1477 }
1478 
1479 /* Disassemble an iwMMXt instruction.  Returns nonzero if an error occurred
1480    (ie. an undefined instruction).  */
disas_iwmmxt_insn(DisasContext * s,uint32_t insn)1481 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1482 {
1483     int rd, wrd;
1484     int rdhi, rdlo, rd0, rd1, i;
1485     TCGv_i32 addr;
1486     TCGv_i32 tmp, tmp2, tmp3;
1487 
1488     if ((insn & 0x0e000e00) == 0x0c000000) {
1489         if ((insn & 0x0fe00ff0) == 0x0c400000) {
1490             wrd = insn & 0xf;
1491             rdlo = (insn >> 12) & 0xf;
1492             rdhi = (insn >> 16) & 0xf;
1493             if (insn & ARM_CP_RW_BIT) {                         /* TMRRC */
1494                 iwmmxt_load_reg(cpu_V0, wrd);
1495                 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1496                 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
1497             } else {                                    /* TMCRR */
1498                 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1499                 iwmmxt_store_reg(cpu_V0, wrd);
1500                 gen_op_iwmmxt_set_mup();
1501             }
1502             return 0;
1503         }
1504 
1505         wrd = (insn >> 12) & 0xf;
1506         addr = tcg_temp_new_i32();
1507         if (gen_iwmmxt_address(s, insn, addr)) {
1508             return 1;
1509         }
1510         if (insn & ARM_CP_RW_BIT) {
1511             if ((insn >> 28) == 0xf) {                  /* WLDRW wCx */
1512                 tmp = tcg_temp_new_i32();
1513                 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1514                 iwmmxt_store_creg(wrd, tmp);
1515             } else {
1516                 i = 1;
1517                 if (insn & (1 << 8)) {
1518                     if (insn & (1 << 22)) {             /* WLDRD */
1519                         gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1520                         i = 0;
1521                     } else {                            /* WLDRW wRd */
1522                         tmp = tcg_temp_new_i32();
1523                         gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1524                     }
1525                 } else {
1526                     tmp = tcg_temp_new_i32();
1527                     if (insn & (1 << 22)) {             /* WLDRH */
1528                         gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1529                     } else {                            /* WLDRB */
1530                         gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1531                     }
1532                 }
1533                 if (i) {
1534                     tcg_gen_extu_i32_i64(cpu_M0, tmp);
1535                 }
1536                 gen_op_iwmmxt_movq_wRn_M0(wrd);
1537             }
1538         } else {
1539             if ((insn >> 28) == 0xf) {                  /* WSTRW wCx */
1540                 tmp = iwmmxt_load_creg(wrd);
1541                 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1542             } else {
1543                 gen_op_iwmmxt_movq_M0_wRn(wrd);
1544                 tmp = tcg_temp_new_i32();
1545                 if (insn & (1 << 8)) {
1546                     if (insn & (1 << 22)) {             /* WSTRD */
1547                         gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1548                     } else {                            /* WSTRW wRd */
1549                         tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1550                         gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1551                     }
1552                 } else {
1553                     if (insn & (1 << 22)) {             /* WSTRH */
1554                         tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1555                         gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1556                     } else {                            /* WSTRB */
1557                         tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1558                         gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1559                     }
1560                 }
1561             }
1562         }
1563         return 0;
1564     }
1565 
1566     if ((insn & 0x0f000000) != 0x0e000000)
1567         return 1;
1568 
1569     switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1570     case 0x000:                                                 /* WOR */
1571         wrd = (insn >> 12) & 0xf;
1572         rd0 = (insn >> 0) & 0xf;
1573         rd1 = (insn >> 16) & 0xf;
1574         gen_op_iwmmxt_movq_M0_wRn(rd0);
1575         gen_op_iwmmxt_orq_M0_wRn(rd1);
1576         gen_op_iwmmxt_setpsr_nz();
1577         gen_op_iwmmxt_movq_wRn_M0(wrd);
1578         gen_op_iwmmxt_set_mup();
1579         gen_op_iwmmxt_set_cup();
1580         break;
1581     case 0x011:                                                 /* TMCR */
1582         if (insn & 0xf)
1583             return 1;
1584         rd = (insn >> 12) & 0xf;
1585         wrd = (insn >> 16) & 0xf;
1586         switch (wrd) {
1587         case ARM_IWMMXT_wCID:
1588         case ARM_IWMMXT_wCASF:
1589             break;
1590         case ARM_IWMMXT_wCon:
1591             gen_op_iwmmxt_set_cup();
1592             /* Fall through.  */
1593         case ARM_IWMMXT_wCSSF:
1594             tmp = iwmmxt_load_creg(wrd);
1595             tmp2 = load_reg(s, rd);
1596             tcg_gen_andc_i32(tmp, tmp, tmp2);
1597             iwmmxt_store_creg(wrd, tmp);
1598             break;
1599         case ARM_IWMMXT_wCGR0:
1600         case ARM_IWMMXT_wCGR1:
1601         case ARM_IWMMXT_wCGR2:
1602         case ARM_IWMMXT_wCGR3:
1603             gen_op_iwmmxt_set_cup();
1604             tmp = load_reg(s, rd);
1605             iwmmxt_store_creg(wrd, tmp);
1606             break;
1607         default:
1608             return 1;
1609         }
1610         break;
1611     case 0x100:                                                 /* WXOR */
1612         wrd = (insn >> 12) & 0xf;
1613         rd0 = (insn >> 0) & 0xf;
1614         rd1 = (insn >> 16) & 0xf;
1615         gen_op_iwmmxt_movq_M0_wRn(rd0);
1616         gen_op_iwmmxt_xorq_M0_wRn(rd1);
1617         gen_op_iwmmxt_setpsr_nz();
1618         gen_op_iwmmxt_movq_wRn_M0(wrd);
1619         gen_op_iwmmxt_set_mup();
1620         gen_op_iwmmxt_set_cup();
1621         break;
1622     case 0x111:                                                 /* TMRC */
1623         if (insn & 0xf)
1624             return 1;
1625         rd = (insn >> 12) & 0xf;
1626         wrd = (insn >> 16) & 0xf;
1627         tmp = iwmmxt_load_creg(wrd);
1628         store_reg(s, rd, tmp);
1629         break;
1630     case 0x300:                                                 /* WANDN */
1631         wrd = (insn >> 12) & 0xf;
1632         rd0 = (insn >> 0) & 0xf;
1633         rd1 = (insn >> 16) & 0xf;
1634         gen_op_iwmmxt_movq_M0_wRn(rd0);
1635         tcg_gen_neg_i64(cpu_M0, cpu_M0);
1636         gen_op_iwmmxt_andq_M0_wRn(rd1);
1637         gen_op_iwmmxt_setpsr_nz();
1638         gen_op_iwmmxt_movq_wRn_M0(wrd);
1639         gen_op_iwmmxt_set_mup();
1640         gen_op_iwmmxt_set_cup();
1641         break;
1642     case 0x200:                                                 /* WAND */
1643         wrd = (insn >> 12) & 0xf;
1644         rd0 = (insn >> 0) & 0xf;
1645         rd1 = (insn >> 16) & 0xf;
1646         gen_op_iwmmxt_movq_M0_wRn(rd0);
1647         gen_op_iwmmxt_andq_M0_wRn(rd1);
1648         gen_op_iwmmxt_setpsr_nz();
1649         gen_op_iwmmxt_movq_wRn_M0(wrd);
1650         gen_op_iwmmxt_set_mup();
1651         gen_op_iwmmxt_set_cup();
1652         break;
1653     case 0x810: case 0xa10:                             /* WMADD */
1654         wrd = (insn >> 12) & 0xf;
1655         rd0 = (insn >> 0) & 0xf;
1656         rd1 = (insn >> 16) & 0xf;
1657         gen_op_iwmmxt_movq_M0_wRn(rd0);
1658         if (insn & (1 << 21))
1659             gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1660         else
1661             gen_op_iwmmxt_madduq_M0_wRn(rd1);
1662         gen_op_iwmmxt_movq_wRn_M0(wrd);
1663         gen_op_iwmmxt_set_mup();
1664         break;
1665     case 0x10e: case 0x50e: case 0x90e: case 0xd0e:     /* WUNPCKIL */
1666         wrd = (insn >> 12) & 0xf;
1667         rd0 = (insn >> 16) & 0xf;
1668         rd1 = (insn >> 0) & 0xf;
1669         gen_op_iwmmxt_movq_M0_wRn(rd0);
1670         switch ((insn >> 22) & 3) {
1671         case 0:
1672             gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1673             break;
1674         case 1:
1675             gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1676             break;
1677         case 2:
1678             gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1679             break;
1680         case 3:
1681             return 1;
1682         }
1683         gen_op_iwmmxt_movq_wRn_M0(wrd);
1684         gen_op_iwmmxt_set_mup();
1685         gen_op_iwmmxt_set_cup();
1686         break;
1687     case 0x10c: case 0x50c: case 0x90c: case 0xd0c:     /* WUNPCKIH */
1688         wrd = (insn >> 12) & 0xf;
1689         rd0 = (insn >> 16) & 0xf;
1690         rd1 = (insn >> 0) & 0xf;
1691         gen_op_iwmmxt_movq_M0_wRn(rd0);
1692         switch ((insn >> 22) & 3) {
1693         case 0:
1694             gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1695             break;
1696         case 1:
1697             gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1698             break;
1699         case 2:
1700             gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1701             break;
1702         case 3:
1703             return 1;
1704         }
1705         gen_op_iwmmxt_movq_wRn_M0(wrd);
1706         gen_op_iwmmxt_set_mup();
1707         gen_op_iwmmxt_set_cup();
1708         break;
1709     case 0x012: case 0x112: case 0x412: case 0x512:     /* WSAD */
1710         wrd = (insn >> 12) & 0xf;
1711         rd0 = (insn >> 16) & 0xf;
1712         rd1 = (insn >> 0) & 0xf;
1713         gen_op_iwmmxt_movq_M0_wRn(rd0);
1714         if (insn & (1 << 22))
1715             gen_op_iwmmxt_sadw_M0_wRn(rd1);
1716         else
1717             gen_op_iwmmxt_sadb_M0_wRn(rd1);
1718         if (!(insn & (1 << 20)))
1719             gen_op_iwmmxt_addl_M0_wRn(wrd);
1720         gen_op_iwmmxt_movq_wRn_M0(wrd);
1721         gen_op_iwmmxt_set_mup();
1722         break;
1723     case 0x010: case 0x110: case 0x210: case 0x310:     /* WMUL */
1724         wrd = (insn >> 12) & 0xf;
1725         rd0 = (insn >> 16) & 0xf;
1726         rd1 = (insn >> 0) & 0xf;
1727         gen_op_iwmmxt_movq_M0_wRn(rd0);
1728         if (insn & (1 << 21)) {
1729             if (insn & (1 << 20))
1730                 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1731             else
1732                 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1733         } else {
1734             if (insn & (1 << 20))
1735                 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1736             else
1737                 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1738         }
1739         gen_op_iwmmxt_movq_wRn_M0(wrd);
1740         gen_op_iwmmxt_set_mup();
1741         break;
1742     case 0x410: case 0x510: case 0x610: case 0x710:     /* WMAC */
1743         wrd = (insn >> 12) & 0xf;
1744         rd0 = (insn >> 16) & 0xf;
1745         rd1 = (insn >> 0) & 0xf;
1746         gen_op_iwmmxt_movq_M0_wRn(rd0);
1747         if (insn & (1 << 21))
1748             gen_op_iwmmxt_macsw_M0_wRn(rd1);
1749         else
1750             gen_op_iwmmxt_macuw_M0_wRn(rd1);
1751         if (!(insn & (1 << 20))) {
1752             iwmmxt_load_reg(cpu_V1, wrd);
1753             tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1754         }
1755         gen_op_iwmmxt_movq_wRn_M0(wrd);
1756         gen_op_iwmmxt_set_mup();
1757         break;
1758     case 0x006: case 0x406: case 0x806: case 0xc06:     /* WCMPEQ */
1759         wrd = (insn >> 12) & 0xf;
1760         rd0 = (insn >> 16) & 0xf;
1761         rd1 = (insn >> 0) & 0xf;
1762         gen_op_iwmmxt_movq_M0_wRn(rd0);
1763         switch ((insn >> 22) & 3) {
1764         case 0:
1765             gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1766             break;
1767         case 1:
1768             gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1769             break;
1770         case 2:
1771             gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1772             break;
1773         case 3:
1774             return 1;
1775         }
1776         gen_op_iwmmxt_movq_wRn_M0(wrd);
1777         gen_op_iwmmxt_set_mup();
1778         gen_op_iwmmxt_set_cup();
1779         break;
1780     case 0x800: case 0x900: case 0xc00: case 0xd00:     /* WAVG2 */
1781         wrd = (insn >> 12) & 0xf;
1782         rd0 = (insn >> 16) & 0xf;
1783         rd1 = (insn >> 0) & 0xf;
1784         gen_op_iwmmxt_movq_M0_wRn(rd0);
1785         if (insn & (1 << 22)) {
1786             if (insn & (1 << 20))
1787                 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1788             else
1789                 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1790         } else {
1791             if (insn & (1 << 20))
1792                 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1793             else
1794                 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1795         }
1796         gen_op_iwmmxt_movq_wRn_M0(wrd);
1797         gen_op_iwmmxt_set_mup();
1798         gen_op_iwmmxt_set_cup();
1799         break;
1800     case 0x802: case 0x902: case 0xa02: case 0xb02:     /* WALIGNR */
1801         wrd = (insn >> 12) & 0xf;
1802         rd0 = (insn >> 16) & 0xf;
1803         rd1 = (insn >> 0) & 0xf;
1804         gen_op_iwmmxt_movq_M0_wRn(rd0);
1805         tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1806         tcg_gen_andi_i32(tmp, tmp, 7);
1807         iwmmxt_load_reg(cpu_V1, rd1);
1808         gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1809         gen_op_iwmmxt_movq_wRn_M0(wrd);
1810         gen_op_iwmmxt_set_mup();
1811         break;
1812     case 0x601: case 0x605: case 0x609: case 0x60d:     /* TINSR */
1813         if (((insn >> 6) & 3) == 3)
1814             return 1;
1815         rd = (insn >> 12) & 0xf;
1816         wrd = (insn >> 16) & 0xf;
1817         tmp = load_reg(s, rd);
1818         gen_op_iwmmxt_movq_M0_wRn(wrd);
1819         switch ((insn >> 6) & 3) {
1820         case 0:
1821             tmp2 = tcg_constant_i32(0xff);
1822             tmp3 = tcg_constant_i32((insn & 7) << 3);
1823             break;
1824         case 1:
1825             tmp2 = tcg_constant_i32(0xffff);
1826             tmp3 = tcg_constant_i32((insn & 3) << 4);
1827             break;
1828         case 2:
1829             tmp2 = tcg_constant_i32(0xffffffff);
1830             tmp3 = tcg_constant_i32((insn & 1) << 5);
1831             break;
1832         default:
1833             g_assert_not_reached();
1834         }
1835         gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1836         gen_op_iwmmxt_movq_wRn_M0(wrd);
1837         gen_op_iwmmxt_set_mup();
1838         break;
1839     case 0x107: case 0x507: case 0x907: case 0xd07:     /* TEXTRM */
1840         rd = (insn >> 12) & 0xf;
1841         wrd = (insn >> 16) & 0xf;
1842         if (rd == 15 || ((insn >> 22) & 3) == 3)
1843             return 1;
1844         gen_op_iwmmxt_movq_M0_wRn(wrd);
1845         tmp = tcg_temp_new_i32();
1846         switch ((insn >> 22) & 3) {
1847         case 0:
1848             tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1849             tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1850             if (insn & 8) {
1851                 tcg_gen_ext8s_i32(tmp, tmp);
1852             } else {
1853                 tcg_gen_andi_i32(tmp, tmp, 0xff);
1854             }
1855             break;
1856         case 1:
1857             tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1858             tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1859             if (insn & 8) {
1860                 tcg_gen_ext16s_i32(tmp, tmp);
1861             } else {
1862                 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1863             }
1864             break;
1865         case 2:
1866             tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1867             tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1868             break;
1869         }
1870         store_reg(s, rd, tmp);
1871         break;
1872     case 0x117: case 0x517: case 0x917: case 0xd17:     /* TEXTRC */
1873         if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1874             return 1;
1875         tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1876         switch ((insn >> 22) & 3) {
1877         case 0:
1878             tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1879             break;
1880         case 1:
1881             tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1882             break;
1883         case 2:
1884             tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1885             break;
1886         }
1887         tcg_gen_shli_i32(tmp, tmp, 28);
1888         gen_set_nzcv(tmp);
1889         break;
1890     case 0x401: case 0x405: case 0x409: case 0x40d:     /* TBCST */
1891         if (((insn >> 6) & 3) == 3)
1892             return 1;
1893         rd = (insn >> 12) & 0xf;
1894         wrd = (insn >> 16) & 0xf;
1895         tmp = load_reg(s, rd);
1896         switch ((insn >> 6) & 3) {
1897         case 0:
1898             gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1899             break;
1900         case 1:
1901             gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1902             break;
1903         case 2:
1904             gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1905             break;
1906         }
1907         gen_op_iwmmxt_movq_wRn_M0(wrd);
1908         gen_op_iwmmxt_set_mup();
1909         break;
1910     case 0x113: case 0x513: case 0x913: case 0xd13:     /* TANDC */
1911         if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1912             return 1;
1913         tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1914         tmp2 = tcg_temp_new_i32();
1915         tcg_gen_mov_i32(tmp2, tmp);
1916         switch ((insn >> 22) & 3) {
1917         case 0:
1918             for (i = 0; i < 7; i ++) {
1919                 tcg_gen_shli_i32(tmp2, tmp2, 4);
1920                 tcg_gen_and_i32(tmp, tmp, tmp2);
1921             }
1922             break;
1923         case 1:
1924             for (i = 0; i < 3; i ++) {
1925                 tcg_gen_shli_i32(tmp2, tmp2, 8);
1926                 tcg_gen_and_i32(tmp, tmp, tmp2);
1927             }
1928             break;
1929         case 2:
1930             tcg_gen_shli_i32(tmp2, tmp2, 16);
1931             tcg_gen_and_i32(tmp, tmp, tmp2);
1932             break;
1933         }
1934         gen_set_nzcv(tmp);
1935         break;
1936     case 0x01c: case 0x41c: case 0x81c: case 0xc1c:     /* WACC */
1937         wrd = (insn >> 12) & 0xf;
1938         rd0 = (insn >> 16) & 0xf;
1939         gen_op_iwmmxt_movq_M0_wRn(rd0);
1940         switch ((insn >> 22) & 3) {
1941         case 0:
1942             gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1943             break;
1944         case 1:
1945             gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1946             break;
1947         case 2:
1948             gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1949             break;
1950         case 3:
1951             return 1;
1952         }
1953         gen_op_iwmmxt_movq_wRn_M0(wrd);
1954         gen_op_iwmmxt_set_mup();
1955         break;
1956     case 0x115: case 0x515: case 0x915: case 0xd15:     /* TORC */
1957         if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1958             return 1;
1959         tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1960         tmp2 = tcg_temp_new_i32();
1961         tcg_gen_mov_i32(tmp2, tmp);
1962         switch ((insn >> 22) & 3) {
1963         case 0:
1964             for (i = 0; i < 7; i ++) {
1965                 tcg_gen_shli_i32(tmp2, tmp2, 4);
1966                 tcg_gen_or_i32(tmp, tmp, tmp2);
1967             }
1968             break;
1969         case 1:
1970             for (i = 0; i < 3; i ++) {
1971                 tcg_gen_shli_i32(tmp2, tmp2, 8);
1972                 tcg_gen_or_i32(tmp, tmp, tmp2);
1973             }
1974             break;
1975         case 2:
1976             tcg_gen_shli_i32(tmp2, tmp2, 16);
1977             tcg_gen_or_i32(tmp, tmp, tmp2);
1978             break;
1979         }
1980         gen_set_nzcv(tmp);
1981         break;
1982     case 0x103: case 0x503: case 0x903: case 0xd03:     /* TMOVMSK */
1983         rd = (insn >> 12) & 0xf;
1984         rd0 = (insn >> 16) & 0xf;
1985         if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1986             return 1;
1987         gen_op_iwmmxt_movq_M0_wRn(rd0);
1988         tmp = tcg_temp_new_i32();
1989         switch ((insn >> 22) & 3) {
1990         case 0:
1991             gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1992             break;
1993         case 1:
1994             gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1995             break;
1996         case 2:
1997             gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1998             break;
1999         }
2000         store_reg(s, rd, tmp);
2001         break;
2002     case 0x106: case 0x306: case 0x506: case 0x706:     /* WCMPGT */
2003     case 0x906: case 0xb06: case 0xd06: case 0xf06:
2004         wrd = (insn >> 12) & 0xf;
2005         rd0 = (insn >> 16) & 0xf;
2006         rd1 = (insn >> 0) & 0xf;
2007         gen_op_iwmmxt_movq_M0_wRn(rd0);
2008         switch ((insn >> 22) & 3) {
2009         case 0:
2010             if (insn & (1 << 21))
2011                 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2012             else
2013                 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2014             break;
2015         case 1:
2016             if (insn & (1 << 21))
2017                 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2018             else
2019                 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2020             break;
2021         case 2:
2022             if (insn & (1 << 21))
2023                 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2024             else
2025                 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2026             break;
2027         case 3:
2028             return 1;
2029         }
2030         gen_op_iwmmxt_movq_wRn_M0(wrd);
2031         gen_op_iwmmxt_set_mup();
2032         gen_op_iwmmxt_set_cup();
2033         break;
2034     case 0x00e: case 0x20e: case 0x40e: case 0x60e:     /* WUNPCKEL */
2035     case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2036         wrd = (insn >> 12) & 0xf;
2037         rd0 = (insn >> 16) & 0xf;
2038         gen_op_iwmmxt_movq_M0_wRn(rd0);
2039         switch ((insn >> 22) & 3) {
2040         case 0:
2041             if (insn & (1 << 21))
2042                 gen_op_iwmmxt_unpacklsb_M0();
2043             else
2044                 gen_op_iwmmxt_unpacklub_M0();
2045             break;
2046         case 1:
2047             if (insn & (1 << 21))
2048                 gen_op_iwmmxt_unpacklsw_M0();
2049             else
2050                 gen_op_iwmmxt_unpackluw_M0();
2051             break;
2052         case 2:
2053             if (insn & (1 << 21))
2054                 gen_op_iwmmxt_unpacklsl_M0();
2055             else
2056                 gen_op_iwmmxt_unpacklul_M0();
2057             break;
2058         case 3:
2059             return 1;
2060         }
2061         gen_op_iwmmxt_movq_wRn_M0(wrd);
2062         gen_op_iwmmxt_set_mup();
2063         gen_op_iwmmxt_set_cup();
2064         break;
2065     case 0x00c: case 0x20c: case 0x40c: case 0x60c:     /* WUNPCKEH */
2066     case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2067         wrd = (insn >> 12) & 0xf;
2068         rd0 = (insn >> 16) & 0xf;
2069         gen_op_iwmmxt_movq_M0_wRn(rd0);
2070         switch ((insn >> 22) & 3) {
2071         case 0:
2072             if (insn & (1 << 21))
2073                 gen_op_iwmmxt_unpackhsb_M0();
2074             else
2075                 gen_op_iwmmxt_unpackhub_M0();
2076             break;
2077         case 1:
2078             if (insn & (1 << 21))
2079                 gen_op_iwmmxt_unpackhsw_M0();
2080             else
2081                 gen_op_iwmmxt_unpackhuw_M0();
2082             break;
2083         case 2:
2084             if (insn & (1 << 21))
2085                 gen_op_iwmmxt_unpackhsl_M0();
2086             else
2087                 gen_op_iwmmxt_unpackhul_M0();
2088             break;
2089         case 3:
2090             return 1;
2091         }
2092         gen_op_iwmmxt_movq_wRn_M0(wrd);
2093         gen_op_iwmmxt_set_mup();
2094         gen_op_iwmmxt_set_cup();
2095         break;
2096     case 0x204: case 0x604: case 0xa04: case 0xe04:     /* WSRL */
2097     case 0x214: case 0x614: case 0xa14: case 0xe14:
2098         if (((insn >> 22) & 3) == 0)
2099             return 1;
2100         wrd = (insn >> 12) & 0xf;
2101         rd0 = (insn >> 16) & 0xf;
2102         gen_op_iwmmxt_movq_M0_wRn(rd0);
2103         tmp = tcg_temp_new_i32();
2104         if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2105             return 1;
2106         }
2107         switch ((insn >> 22) & 3) {
2108         case 1:
2109             gen_helper_iwmmxt_srlw(cpu_M0, tcg_env, cpu_M0, tmp);
2110             break;
2111         case 2:
2112             gen_helper_iwmmxt_srll(cpu_M0, tcg_env, cpu_M0, tmp);
2113             break;
2114         case 3:
2115             gen_helper_iwmmxt_srlq(cpu_M0, tcg_env, cpu_M0, tmp);
2116             break;
2117         }
2118         gen_op_iwmmxt_movq_wRn_M0(wrd);
2119         gen_op_iwmmxt_set_mup();
2120         gen_op_iwmmxt_set_cup();
2121         break;
2122     case 0x004: case 0x404: case 0x804: case 0xc04:     /* WSRA */
2123     case 0x014: case 0x414: case 0x814: case 0xc14:
2124         if (((insn >> 22) & 3) == 0)
2125             return 1;
2126         wrd = (insn >> 12) & 0xf;
2127         rd0 = (insn >> 16) & 0xf;
2128         gen_op_iwmmxt_movq_M0_wRn(rd0);
2129         tmp = tcg_temp_new_i32();
2130         if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2131             return 1;
2132         }
2133         switch ((insn >> 22) & 3) {
2134         case 1:
2135             gen_helper_iwmmxt_sraw(cpu_M0, tcg_env, cpu_M0, tmp);
2136             break;
2137         case 2:
2138             gen_helper_iwmmxt_sral(cpu_M0, tcg_env, cpu_M0, tmp);
2139             break;
2140         case 3:
2141             gen_helper_iwmmxt_sraq(cpu_M0, tcg_env, cpu_M0, tmp);
2142             break;
2143         }
2144         gen_op_iwmmxt_movq_wRn_M0(wrd);
2145         gen_op_iwmmxt_set_mup();
2146         gen_op_iwmmxt_set_cup();
2147         break;
2148     case 0x104: case 0x504: case 0x904: case 0xd04:     /* WSLL */
2149     case 0x114: case 0x514: case 0x914: case 0xd14:
2150         if (((insn >> 22) & 3) == 0)
2151             return 1;
2152         wrd = (insn >> 12) & 0xf;
2153         rd0 = (insn >> 16) & 0xf;
2154         gen_op_iwmmxt_movq_M0_wRn(rd0);
2155         tmp = tcg_temp_new_i32();
2156         if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2157             return 1;
2158         }
2159         switch ((insn >> 22) & 3) {
2160         case 1:
2161             gen_helper_iwmmxt_sllw(cpu_M0, tcg_env, cpu_M0, tmp);
2162             break;
2163         case 2:
2164             gen_helper_iwmmxt_slll(cpu_M0, tcg_env, cpu_M0, tmp);
2165             break;
2166         case 3:
2167             gen_helper_iwmmxt_sllq(cpu_M0, tcg_env, cpu_M0, tmp);
2168             break;
2169         }
2170         gen_op_iwmmxt_movq_wRn_M0(wrd);
2171         gen_op_iwmmxt_set_mup();
2172         gen_op_iwmmxt_set_cup();
2173         break;
2174     case 0x304: case 0x704: case 0xb04: case 0xf04:     /* WROR */
2175     case 0x314: case 0x714: case 0xb14: case 0xf14:
2176         if (((insn >> 22) & 3) == 0)
2177             return 1;
2178         wrd = (insn >> 12) & 0xf;
2179         rd0 = (insn >> 16) & 0xf;
2180         gen_op_iwmmxt_movq_M0_wRn(rd0);
2181         tmp = tcg_temp_new_i32();
2182         switch ((insn >> 22) & 3) {
2183         case 1:
2184             if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2185                 return 1;
2186             }
2187             gen_helper_iwmmxt_rorw(cpu_M0, tcg_env, cpu_M0, tmp);
2188             break;
2189         case 2:
2190             if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2191                 return 1;
2192             }
2193             gen_helper_iwmmxt_rorl(cpu_M0, tcg_env, cpu_M0, tmp);
2194             break;
2195         case 3:
2196             if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2197                 return 1;
2198             }
2199             gen_helper_iwmmxt_rorq(cpu_M0, tcg_env, cpu_M0, tmp);
2200             break;
2201         }
2202         gen_op_iwmmxt_movq_wRn_M0(wrd);
2203         gen_op_iwmmxt_set_mup();
2204         gen_op_iwmmxt_set_cup();
2205         break;
2206     case 0x116: case 0x316: case 0x516: case 0x716:     /* WMIN */
2207     case 0x916: case 0xb16: case 0xd16: case 0xf16:
2208         wrd = (insn >> 12) & 0xf;
2209         rd0 = (insn >> 16) & 0xf;
2210         rd1 = (insn >> 0) & 0xf;
2211         gen_op_iwmmxt_movq_M0_wRn(rd0);
2212         switch ((insn >> 22) & 3) {
2213         case 0:
2214             if (insn & (1 << 21))
2215                 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2216             else
2217                 gen_op_iwmmxt_minub_M0_wRn(rd1);
2218             break;
2219         case 1:
2220             if (insn & (1 << 21))
2221                 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2222             else
2223                 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2224             break;
2225         case 2:
2226             if (insn & (1 << 21))
2227                 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2228             else
2229                 gen_op_iwmmxt_minul_M0_wRn(rd1);
2230             break;
2231         case 3:
2232             return 1;
2233         }
2234         gen_op_iwmmxt_movq_wRn_M0(wrd);
2235         gen_op_iwmmxt_set_mup();
2236         break;
2237     case 0x016: case 0x216: case 0x416: case 0x616:     /* WMAX */
2238     case 0x816: case 0xa16: case 0xc16: case 0xe16:
2239         wrd = (insn >> 12) & 0xf;
2240         rd0 = (insn >> 16) & 0xf;
2241         rd1 = (insn >> 0) & 0xf;
2242         gen_op_iwmmxt_movq_M0_wRn(rd0);
2243         switch ((insn >> 22) & 3) {
2244         case 0:
2245             if (insn & (1 << 21))
2246                 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2247             else
2248                 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2249             break;
2250         case 1:
2251             if (insn & (1 << 21))
2252                 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2253             else
2254                 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2255             break;
2256         case 2:
2257             if (insn & (1 << 21))
2258                 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2259             else
2260                 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2261             break;
2262         case 3:
2263             return 1;
2264         }
2265         gen_op_iwmmxt_movq_wRn_M0(wrd);
2266         gen_op_iwmmxt_set_mup();
2267         break;
2268     case 0x002: case 0x102: case 0x202: case 0x302:     /* WALIGNI */
2269     case 0x402: case 0x502: case 0x602: case 0x702:
2270         wrd = (insn >> 12) & 0xf;
2271         rd0 = (insn >> 16) & 0xf;
2272         rd1 = (insn >> 0) & 0xf;
2273         gen_op_iwmmxt_movq_M0_wRn(rd0);
2274         iwmmxt_load_reg(cpu_V1, rd1);
2275         gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1,
2276                                 tcg_constant_i32((insn >> 20) & 3));
2277         gen_op_iwmmxt_movq_wRn_M0(wrd);
2278         gen_op_iwmmxt_set_mup();
2279         break;
2280     case 0x01a: case 0x11a: case 0x21a: case 0x31a:     /* WSUB */
2281     case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2282     case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2283     case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2284         wrd = (insn >> 12) & 0xf;
2285         rd0 = (insn >> 16) & 0xf;
2286         rd1 = (insn >> 0) & 0xf;
2287         gen_op_iwmmxt_movq_M0_wRn(rd0);
2288         switch ((insn >> 20) & 0xf) {
2289         case 0x0:
2290             gen_op_iwmmxt_subnb_M0_wRn(rd1);
2291             break;
2292         case 0x1:
2293             gen_op_iwmmxt_subub_M0_wRn(rd1);
2294             break;
2295         case 0x3:
2296             gen_op_iwmmxt_subsb_M0_wRn(rd1);
2297             break;
2298         case 0x4:
2299             gen_op_iwmmxt_subnw_M0_wRn(rd1);
2300             break;
2301         case 0x5:
2302             gen_op_iwmmxt_subuw_M0_wRn(rd1);
2303             break;
2304         case 0x7:
2305             gen_op_iwmmxt_subsw_M0_wRn(rd1);
2306             break;
2307         case 0x8:
2308             gen_op_iwmmxt_subnl_M0_wRn(rd1);
2309             break;
2310         case 0x9:
2311             gen_op_iwmmxt_subul_M0_wRn(rd1);
2312             break;
2313         case 0xb:
2314             gen_op_iwmmxt_subsl_M0_wRn(rd1);
2315             break;
2316         default:
2317             return 1;
2318         }
2319         gen_op_iwmmxt_movq_wRn_M0(wrd);
2320         gen_op_iwmmxt_set_mup();
2321         gen_op_iwmmxt_set_cup();
2322         break;
2323     case 0x01e: case 0x11e: case 0x21e: case 0x31e:     /* WSHUFH */
2324     case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2325     case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2326     case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2327         wrd = (insn >> 12) & 0xf;
2328         rd0 = (insn >> 16) & 0xf;
2329         gen_op_iwmmxt_movq_M0_wRn(rd0);
2330         tmp = tcg_constant_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2331         gen_helper_iwmmxt_shufh(cpu_M0, tcg_env, cpu_M0, tmp);
2332         gen_op_iwmmxt_movq_wRn_M0(wrd);
2333         gen_op_iwmmxt_set_mup();
2334         gen_op_iwmmxt_set_cup();
2335         break;
2336     case 0x018: case 0x118: case 0x218: case 0x318:     /* WADD */
2337     case 0x418: case 0x518: case 0x618: case 0x718:
2338     case 0x818: case 0x918: case 0xa18: case 0xb18:
2339     case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2340         wrd = (insn >> 12) & 0xf;
2341         rd0 = (insn >> 16) & 0xf;
2342         rd1 = (insn >> 0) & 0xf;
2343         gen_op_iwmmxt_movq_M0_wRn(rd0);
2344         switch ((insn >> 20) & 0xf) {
2345         case 0x0:
2346             gen_op_iwmmxt_addnb_M0_wRn(rd1);
2347             break;
2348         case 0x1:
2349             gen_op_iwmmxt_addub_M0_wRn(rd1);
2350             break;
2351         case 0x3:
2352             gen_op_iwmmxt_addsb_M0_wRn(rd1);
2353             break;
2354         case 0x4:
2355             gen_op_iwmmxt_addnw_M0_wRn(rd1);
2356             break;
2357         case 0x5:
2358             gen_op_iwmmxt_adduw_M0_wRn(rd1);
2359             break;
2360         case 0x7:
2361             gen_op_iwmmxt_addsw_M0_wRn(rd1);
2362             break;
2363         case 0x8:
2364             gen_op_iwmmxt_addnl_M0_wRn(rd1);
2365             break;
2366         case 0x9:
2367             gen_op_iwmmxt_addul_M0_wRn(rd1);
2368             break;
2369         case 0xb:
2370             gen_op_iwmmxt_addsl_M0_wRn(rd1);
2371             break;
2372         default:
2373             return 1;
2374         }
2375         gen_op_iwmmxt_movq_wRn_M0(wrd);
2376         gen_op_iwmmxt_set_mup();
2377         gen_op_iwmmxt_set_cup();
2378         break;
2379     case 0x008: case 0x108: case 0x208: case 0x308:     /* WPACK */
2380     case 0x408: case 0x508: case 0x608: case 0x708:
2381     case 0x808: case 0x908: case 0xa08: case 0xb08:
2382     case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2383         if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2384             return 1;
2385         wrd = (insn >> 12) & 0xf;
2386         rd0 = (insn >> 16) & 0xf;
2387         rd1 = (insn >> 0) & 0xf;
2388         gen_op_iwmmxt_movq_M0_wRn(rd0);
2389         switch ((insn >> 22) & 3) {
2390         case 1:
2391             if (insn & (1 << 21))
2392                 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2393             else
2394                 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2395             break;
2396         case 2:
2397             if (insn & (1 << 21))
2398                 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2399             else
2400                 gen_op_iwmmxt_packul_M0_wRn(rd1);
2401             break;
2402         case 3:
2403             if (insn & (1 << 21))
2404                 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2405             else
2406                 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2407             break;
2408         }
2409         gen_op_iwmmxt_movq_wRn_M0(wrd);
2410         gen_op_iwmmxt_set_mup();
2411         gen_op_iwmmxt_set_cup();
2412         break;
2413     case 0x201: case 0x203: case 0x205: case 0x207:
2414     case 0x209: case 0x20b: case 0x20d: case 0x20f:
2415     case 0x211: case 0x213: case 0x215: case 0x217:
2416     case 0x219: case 0x21b: case 0x21d: case 0x21f:
2417         wrd = (insn >> 5) & 0xf;
2418         rd0 = (insn >> 12) & 0xf;
2419         rd1 = (insn >> 0) & 0xf;
2420         if (rd0 == 0xf || rd1 == 0xf)
2421             return 1;
2422         gen_op_iwmmxt_movq_M0_wRn(wrd);
2423         tmp = load_reg(s, rd0);
2424         tmp2 = load_reg(s, rd1);
2425         switch ((insn >> 16) & 0xf) {
2426         case 0x0:                                       /* TMIA */
2427             gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2428             break;
2429         case 0x8:                                       /* TMIAPH */
2430             gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2431             break;
2432         case 0xc: case 0xd: case 0xe: case 0xf:                 /* TMIAxy */
2433             if (insn & (1 << 16))
2434                 tcg_gen_shri_i32(tmp, tmp, 16);
2435             if (insn & (1 << 17))
2436                 tcg_gen_shri_i32(tmp2, tmp2, 16);
2437             gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2438             break;
2439         default:
2440             return 1;
2441         }
2442         gen_op_iwmmxt_movq_wRn_M0(wrd);
2443         gen_op_iwmmxt_set_mup();
2444         break;
2445     default:
2446         return 1;
2447     }
2448 
2449     return 0;
2450 }
2451 
2452 /* Disassemble an XScale DSP instruction.  Returns nonzero if an error occurred
2453    (ie. an undefined instruction).  */
disas_dsp_insn(DisasContext * s,uint32_t insn)2454 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2455 {
2456     int acc, rd0, rd1, rdhi, rdlo;
2457     TCGv_i32 tmp, tmp2;
2458 
2459     if ((insn & 0x0ff00f10) == 0x0e200010) {
2460         /* Multiply with Internal Accumulate Format */
2461         rd0 = (insn >> 12) & 0xf;
2462         rd1 = insn & 0xf;
2463         acc = (insn >> 5) & 7;
2464 
2465         if (acc != 0)
2466             return 1;
2467 
2468         tmp = load_reg(s, rd0);
2469         tmp2 = load_reg(s, rd1);
2470         switch ((insn >> 16) & 0xf) {
2471         case 0x0:                                       /* MIA */
2472             gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2473             break;
2474         case 0x8:                                       /* MIAPH */
2475             gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2476             break;
2477         case 0xc:                                       /* MIABB */
2478         case 0xd:                                       /* MIABT */
2479         case 0xe:                                       /* MIATB */
2480         case 0xf:                                       /* MIATT */
2481             if (insn & (1 << 16))
2482                 tcg_gen_shri_i32(tmp, tmp, 16);
2483             if (insn & (1 << 17))
2484                 tcg_gen_shri_i32(tmp2, tmp2, 16);
2485             gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2486             break;
2487         default:
2488             return 1;
2489         }
2490 
2491         gen_op_iwmmxt_movq_wRn_M0(acc);
2492         return 0;
2493     }
2494 
2495     if ((insn & 0x0fe00ff8) == 0x0c400000) {
2496         /* Internal Accumulator Access Format */
2497         rdhi = (insn >> 16) & 0xf;
2498         rdlo = (insn >> 12) & 0xf;
2499         acc = insn & 7;
2500 
2501         if (acc != 0)
2502             return 1;
2503 
2504         if (insn & ARM_CP_RW_BIT) {                     /* MRA */
2505             iwmmxt_load_reg(cpu_V0, acc);
2506             tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2507             tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
2508             tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2509         } else {                                        /* MAR */
2510             tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2511             iwmmxt_store_reg(cpu_V0, acc);
2512         }
2513         return 0;
2514     }
2515 
2516     return 1;
2517 }
2518 
gen_goto_ptr(void)2519 static void gen_goto_ptr(void)
2520 {
2521     tcg_gen_lookup_and_goto_ptr();
2522 }
2523 
2524 /* This will end the TB but doesn't guarantee we'll return to
2525  * cpu_loop_exec. Any live exit_requests will be processed as we
2526  * enter the next TB.
2527  */
gen_goto_tb(DisasContext * s,int n,target_long diff)2528 static void gen_goto_tb(DisasContext *s, int n, target_long diff)
2529 {
2530     if (translator_use_goto_tb(&s->base, s->pc_curr + diff)) {
2531         /*
2532          * For pcrel, the pc must always be up-to-date on entry to
2533          * the linked TB, so that it can use simple additions for all
2534          * further adjustments.  For !pcrel, the linked TB is compiled
2535          * to know its full virtual address, so we can delay the
2536          * update to pc to the unlinked path.  A long chain of links
2537          * can thus avoid many updates to the PC.
2538          */
2539         if (tb_cflags(s->base.tb) & CF_PCREL) {
2540             gen_update_pc(s, diff);
2541             tcg_gen_goto_tb(n);
2542         } else {
2543             tcg_gen_goto_tb(n);
2544             gen_update_pc(s, diff);
2545         }
2546         tcg_gen_exit_tb(s->base.tb, n);
2547     } else {
2548         gen_update_pc(s, diff);
2549         gen_goto_ptr();
2550     }
2551     s->base.is_jmp = DISAS_NORETURN;
2552 }
2553 
2554 /* Jump, specifying which TB number to use if we gen_goto_tb() */
gen_jmp_tb(DisasContext * s,target_long diff,int tbno)2555 static void gen_jmp_tb(DisasContext *s, target_long diff, int tbno)
2556 {
2557     if (unlikely(s->ss_active)) {
2558         /* An indirect jump so that we still trigger the debug exception.  */
2559         gen_update_pc(s, diff);
2560         s->base.is_jmp = DISAS_JUMP;
2561         return;
2562     }
2563     switch (s->base.is_jmp) {
2564     case DISAS_NEXT:
2565     case DISAS_TOO_MANY:
2566     case DISAS_NORETURN:
2567         /*
2568          * The normal case: just go to the destination TB.
2569          * NB: NORETURN happens if we generate code like
2570          *    gen_brcondi(l);
2571          *    gen_jmp();
2572          *    gen_set_label(l);
2573          *    gen_jmp();
2574          * on the second call to gen_jmp().
2575          */
2576         gen_goto_tb(s, tbno, diff);
2577         break;
2578     case DISAS_UPDATE_NOCHAIN:
2579     case DISAS_UPDATE_EXIT:
2580         /*
2581          * We already decided we're leaving the TB for some other reason.
2582          * Avoid using goto_tb so we really do exit back to the main loop
2583          * and don't chain to another TB.
2584          */
2585         gen_update_pc(s, diff);
2586         gen_goto_ptr();
2587         s->base.is_jmp = DISAS_NORETURN;
2588         break;
2589     default:
2590         /*
2591          * We shouldn't be emitting code for a jump and also have
2592          * is_jmp set to one of the special cases like DISAS_SWI.
2593          */
2594         g_assert_not_reached();
2595     }
2596 }
2597 
gen_jmp(DisasContext * s,target_long diff)2598 static inline void gen_jmp(DisasContext *s, target_long diff)
2599 {
2600     gen_jmp_tb(s, diff, 0);
2601 }
2602 
gen_mulxy(TCGv_i32 t0,TCGv_i32 t1,int x,int y)2603 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
2604 {
2605     if (x)
2606         tcg_gen_sari_i32(t0, t0, 16);
2607     else
2608         gen_sxth(t0);
2609     if (y)
2610         tcg_gen_sari_i32(t1, t1, 16);
2611     else
2612         gen_sxth(t1);
2613     tcg_gen_mul_i32(t0, t0, t1);
2614 }
2615 
2616 /* Return the mask of PSR bits set by a MSR instruction.  */
msr_mask(DisasContext * s,int flags,int spsr)2617 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
2618 {
2619     uint32_t mask = 0;
2620 
2621     if (flags & (1 << 0)) {
2622         mask |= 0xff;
2623     }
2624     if (flags & (1 << 1)) {
2625         mask |= 0xff00;
2626     }
2627     if (flags & (1 << 2)) {
2628         mask |= 0xff0000;
2629     }
2630     if (flags & (1 << 3)) {
2631         mask |= 0xff000000;
2632     }
2633 
2634     /* Mask out undefined and reserved bits.  */
2635     mask &= aarch32_cpsr_valid_mask(s->features, s->isar);
2636 
2637     /* Mask out execution state.  */
2638     if (!spsr) {
2639         mask &= ~CPSR_EXEC;
2640     }
2641 
2642     /* Mask out privileged bits.  */
2643     if (IS_USER(s)) {
2644         mask &= CPSR_USER;
2645     }
2646     return mask;
2647 }
2648 
2649 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
gen_set_psr(DisasContext * s,uint32_t mask,int spsr,TCGv_i32 t0)2650 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
2651 {
2652     TCGv_i32 tmp;
2653     if (spsr) {
2654         /* ??? This is also undefined in system mode.  */
2655         if (IS_USER(s))
2656             return 1;
2657 
2658         tmp = load_cpu_field(spsr);
2659         tcg_gen_andi_i32(tmp, tmp, ~mask);
2660         tcg_gen_andi_i32(t0, t0, mask);
2661         tcg_gen_or_i32(tmp, tmp, t0);
2662         store_cpu_field(tmp, spsr);
2663     } else {
2664         gen_set_cpsr(t0, mask);
2665     }
2666     gen_lookup_tb(s);
2667     return 0;
2668 }
2669 
2670 /* Returns nonzero if access to the PSR is not permitted.  */
gen_set_psr_im(DisasContext * s,uint32_t mask,int spsr,uint32_t val)2671 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
2672 {
2673     TCGv_i32 tmp;
2674     tmp = tcg_temp_new_i32();
2675     tcg_gen_movi_i32(tmp, val);
2676     return gen_set_psr(s, mask, spsr, tmp);
2677 }
2678 
msr_banked_access_decode(DisasContext * s,int r,int sysm,int rn,int * tgtmode,int * regno)2679 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
2680                                      int *tgtmode, int *regno)
2681 {
2682     /* Decode the r and sysm fields of MSR/MRS banked accesses into
2683      * the target mode and register number, and identify the various
2684      * unpredictable cases.
2685      * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
2686      *  + executed in user mode
2687      *  + using R15 as the src/dest register
2688      *  + accessing an unimplemented register
2689      *  + accessing a register that's inaccessible at current PL/security state*
2690      *  + accessing a register that you could access with a different insn
2691      * We choose to UNDEF in all these cases.
2692      * Since we don't know which of the various AArch32 modes we are in
2693      * we have to defer some checks to runtime.
2694      * Accesses to Monitor mode registers from Secure EL1 (which implies
2695      * that EL3 is AArch64) must trap to EL3.
2696      *
2697      * If the access checks fail this function will emit code to take
2698      * an exception and return false. Otherwise it will return true,
2699      * and set *tgtmode and *regno appropriately.
2700      */
2701     /* These instructions are present only in ARMv8, or in ARMv7 with the
2702      * Virtualization Extensions.
2703      */
2704     if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
2705         !arm_dc_feature(s, ARM_FEATURE_EL2)) {
2706         goto undef;
2707     }
2708 
2709     if (IS_USER(s) || rn == 15) {
2710         goto undef;
2711     }
2712 
2713     /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
2714      * of registers into (r, sysm).
2715      */
2716     if (r) {
2717         /* SPSRs for other modes */
2718         switch (sysm) {
2719         case 0xe: /* SPSR_fiq */
2720             *tgtmode = ARM_CPU_MODE_FIQ;
2721             break;
2722         case 0x10: /* SPSR_irq */
2723             *tgtmode = ARM_CPU_MODE_IRQ;
2724             break;
2725         case 0x12: /* SPSR_svc */
2726             *tgtmode = ARM_CPU_MODE_SVC;
2727             break;
2728         case 0x14: /* SPSR_abt */
2729             *tgtmode = ARM_CPU_MODE_ABT;
2730             break;
2731         case 0x16: /* SPSR_und */
2732             *tgtmode = ARM_CPU_MODE_UND;
2733             break;
2734         case 0x1c: /* SPSR_mon */
2735             *tgtmode = ARM_CPU_MODE_MON;
2736             break;
2737         case 0x1e: /* SPSR_hyp */
2738             *tgtmode = ARM_CPU_MODE_HYP;
2739             break;
2740         default: /* unallocated */
2741             goto undef;
2742         }
2743         /* We arbitrarily assign SPSR a register number of 16. */
2744         *regno = 16;
2745     } else {
2746         /* general purpose registers for other modes */
2747         switch (sysm) {
2748         case 0x0 ... 0x6:   /* 0b00xxx : r8_usr ... r14_usr */
2749             *tgtmode = ARM_CPU_MODE_USR;
2750             *regno = sysm + 8;
2751             break;
2752         case 0x8 ... 0xe:   /* 0b01xxx : r8_fiq ... r14_fiq */
2753             *tgtmode = ARM_CPU_MODE_FIQ;
2754             *regno = sysm;
2755             break;
2756         case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
2757             *tgtmode = ARM_CPU_MODE_IRQ;
2758             *regno = sysm & 1 ? 13 : 14;
2759             break;
2760         case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
2761             *tgtmode = ARM_CPU_MODE_SVC;
2762             *regno = sysm & 1 ? 13 : 14;
2763             break;
2764         case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
2765             *tgtmode = ARM_CPU_MODE_ABT;
2766             *regno = sysm & 1 ? 13 : 14;
2767             break;
2768         case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
2769             *tgtmode = ARM_CPU_MODE_UND;
2770             *regno = sysm & 1 ? 13 : 14;
2771             break;
2772         case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
2773             *tgtmode = ARM_CPU_MODE_MON;
2774             *regno = sysm & 1 ? 13 : 14;
2775             break;
2776         case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
2777             *tgtmode = ARM_CPU_MODE_HYP;
2778             /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
2779             *regno = sysm & 1 ? 13 : 17;
2780             break;
2781         default: /* unallocated */
2782             goto undef;
2783         }
2784     }
2785 
2786     /* Catch the 'accessing inaccessible register' cases we can detect
2787      * at translate time.
2788      */
2789     switch (*tgtmode) {
2790     case ARM_CPU_MODE_MON:
2791         if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
2792             goto undef;
2793         }
2794         if (s->current_el == 1) {
2795             /* If we're in Secure EL1 (which implies that EL3 is AArch64)
2796              * then accesses to Mon registers trap to Secure EL2, if it exists,
2797              * otherwise EL3.
2798              */
2799             TCGv_i32 tcg_el;
2800 
2801             if (arm_dc_feature(s, ARM_FEATURE_AARCH64) &&
2802                 dc_isar_feature(aa64_sel2, s)) {
2803                 /* Target EL is EL<3 minus SCR_EL3.EEL2> */
2804                 tcg_el = load_cpu_field_low32(cp15.scr_el3);
2805                 tcg_gen_sextract_i32(tcg_el, tcg_el, ctz32(SCR_EEL2), 1);
2806                 tcg_gen_addi_i32(tcg_el, tcg_el, 3);
2807             } else {
2808                 tcg_el = tcg_constant_i32(3);
2809             }
2810 
2811             gen_exception_insn_el_v(s, 0, EXCP_UDEF,
2812                                     syn_uncategorized(), tcg_el);
2813             return false;
2814         }
2815         break;
2816     case ARM_CPU_MODE_HYP:
2817         /*
2818          * r13_hyp can only be accessed from Monitor mode, and so we
2819          * can forbid accesses from EL2 or below.
2820          * elr_hyp can be accessed also from Hyp mode, so forbid
2821          * accesses from EL0 or EL1.
2822          * SPSR_hyp is supposed to be in the same category as r13_hyp
2823          * and UNPREDICTABLE if accessed from anything except Monitor
2824          * mode. However there is some real-world code that will do
2825          * it because at least some hardware happens to permit the
2826          * access. (Notably a standard Cortex-R52 startup code fragment
2827          * does this.) So we permit SPSR_hyp from Hyp mode also, to allow
2828          * this (incorrect) guest code to run.
2829          */
2830         if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2
2831             || (s->current_el < 3 && *regno != 16 && *regno != 17)) {
2832             goto undef;
2833         }
2834         break;
2835     default:
2836         break;
2837     }
2838 
2839     return true;
2840 
2841 undef:
2842     /* If we get here then some access check did not pass */
2843     gen_exception_insn(s, 0, EXCP_UDEF, syn_uncategorized());
2844     return false;
2845 }
2846 
gen_msr_banked(DisasContext * s,int r,int sysm,int rn)2847 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
2848 {
2849     TCGv_i32 tcg_reg;
2850     int tgtmode = 0, regno = 0;
2851 
2852     if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
2853         return;
2854     }
2855 
2856     /* Sync state because msr_banked() can raise exceptions */
2857     gen_set_condexec(s);
2858     gen_update_pc(s, 0);
2859     tcg_reg = load_reg(s, rn);
2860     gen_helper_msr_banked(tcg_env, tcg_reg,
2861                           tcg_constant_i32(tgtmode),
2862                           tcg_constant_i32(regno));
2863     s->base.is_jmp = DISAS_UPDATE_EXIT;
2864 }
2865 
gen_mrs_banked(DisasContext * s,int r,int sysm,int rn)2866 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
2867 {
2868     TCGv_i32 tcg_reg;
2869     int tgtmode = 0, regno = 0;
2870 
2871     if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
2872         return;
2873     }
2874 
2875     /* Sync state because mrs_banked() can raise exceptions */
2876     gen_set_condexec(s);
2877     gen_update_pc(s, 0);
2878     tcg_reg = tcg_temp_new_i32();
2879     gen_helper_mrs_banked(tcg_reg, tcg_env,
2880                           tcg_constant_i32(tgtmode),
2881                           tcg_constant_i32(regno));
2882     store_reg(s, rn, tcg_reg);
2883     s->base.is_jmp = DISAS_UPDATE_EXIT;
2884 }
2885 
2886 /* Store value to PC as for an exception return (ie don't
2887  * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
2888  * will do the masking based on the new value of the Thumb bit.
2889  */
store_pc_exc_ret(DisasContext * s,TCGv_i32 pc)2890 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
2891 {
2892     tcg_gen_mov_i32(cpu_R[15], pc);
2893 }
2894 
2895 /* Generate a v6 exception return.  Marks both values as dead.  */
gen_rfe(DisasContext * s,TCGv_i32 pc,TCGv_i32 cpsr)2896 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2897 {
2898     store_pc_exc_ret(s, pc);
2899     /* The cpsr_write_eret helper will mask the low bits of PC
2900      * appropriately depending on the new Thumb bit, so it must
2901      * be called after storing the new PC.
2902      */
2903     translator_io_start(&s->base);
2904     gen_helper_cpsr_write_eret(tcg_env, cpsr);
2905     /* Must exit loop to check un-masked IRQs */
2906     s->base.is_jmp = DISAS_EXIT;
2907 }
2908 
2909 /* Generate an old-style exception return. Marks pc as dead. */
gen_exception_return(DisasContext * s,TCGv_i32 pc)2910 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
2911 {
2912     gen_rfe(s, pc, load_cpu_field(spsr));
2913 }
2914 
aa32_cpreg_encoding_in_impdef_space(uint8_t crn,uint8_t crm)2915 static bool aa32_cpreg_encoding_in_impdef_space(uint8_t crn, uint8_t crm)
2916 {
2917     static const uint16_t mask[3] = {
2918         0b0000000111100111,  /* crn ==  9, crm == {c0-c2, c5-c8}   */
2919         0b0000000100010011,  /* crn == 10, crm == {c0, c1, c4, c8} */
2920         0b1000000111111111,  /* crn == 11, crm == {c0-c8, c15}     */
2921     };
2922 
2923     if (crn >= 9 && crn <= 11) {
2924         return (mask[crn - 9] >> crm) & 1;
2925     }
2926     return false;
2927 }
2928 
do_coproc_insn(DisasContext * s,int cpnum,int is64,int opc1,int crn,int crm,int opc2,bool isread,int rt,int rt2)2929 static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
2930                            int opc1, int crn, int crm, int opc2,
2931                            bool isread, int rt, int rt2)
2932 {
2933     uint32_t key = ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2);
2934     const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key);
2935     TCGv_ptr tcg_ri = NULL;
2936     bool need_exit_tb = false;
2937     uint32_t syndrome;
2938 
2939     /*
2940      * Note that since we are an implementation which takes an
2941      * exception on a trapped conditional instruction only if the
2942      * instruction passes its condition code check, we can take
2943      * advantage of the clause in the ARM ARM that allows us to set
2944      * the COND field in the instruction to 0xE in all cases.
2945      * We could fish the actual condition out of the insn (ARM)
2946      * or the condexec bits (Thumb) but it isn't necessary.
2947      */
2948     switch (cpnum) {
2949     case 14:
2950         if (is64) {
2951             syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
2952                                          isread, false);
2953         } else {
2954             syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
2955                                         rt, isread, false);
2956         }
2957         break;
2958     case 15:
2959         if (is64) {
2960             syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
2961                                          isread, false);
2962         } else {
2963             syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
2964                                         rt, isread, false);
2965         }
2966         break;
2967     default:
2968         /*
2969          * ARMv8 defines that only coprocessors 14 and 15 exist,
2970          * so this can only happen if this is an ARMv7 or earlier CPU,
2971          * in which case the syndrome information won't actually be
2972          * guest visible.
2973          */
2974         assert(!arm_dc_feature(s, ARM_FEATURE_V8));
2975         syndrome = syn_uncategorized();
2976         break;
2977     }
2978 
2979     if (s->hstr_active && cpnum == 15 && s->current_el == 1) {
2980         /*
2981          * At EL1, check for a HSTR_EL2 trap, which must take precedence
2982          * over the UNDEF for "no such register" or the UNDEF for "access
2983          * permissions forbid this EL1 access". HSTR_EL2 traps from EL0
2984          * only happen if the cpreg doesn't UNDEF at EL0, so we do those in
2985          * access_check_cp_reg(), after the checks for whether the access
2986          * configurably trapped to EL1.
2987          */
2988         uint32_t maskbit = is64 ? crm : crn;
2989 
2990         if (maskbit != 4 && maskbit != 14) {
2991             /* T4 and T14 are RES0 so never cause traps */
2992             TCGv_i32 t;
2993             DisasLabel over = gen_disas_label(s);
2994 
2995             t = load_cpu_offset(offsetoflow32(CPUARMState, cp15.hstr_el2));
2996             tcg_gen_andi_i32(t, t, 1u << maskbit);
2997             tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, over.label);
2998 
2999             gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2);
3000             /*
3001              * gen_exception_insn() will set is_jmp to DISAS_NORETURN,
3002              * but since we're conditionally branching over it, we want
3003              * to assume continue-to-next-instruction.
3004              */
3005             s->base.is_jmp = DISAS_NEXT;
3006             set_disas_label(s, over);
3007         }
3008     }
3009 
3010     if (cpnum == 15 && aa32_cpreg_encoding_in_impdef_space(crn, crm)) {
3011         /*
3012          * Check for TIDCP trap, which must take precedence over the UNDEF
3013          * for "no such register" etc.  It shares precedence with HSTR,
3014          * but raises the same exception, so order doesn't matter.
3015          */
3016         switch (s->current_el) {
3017         case 0:
3018             if (arm_dc_feature(s, ARM_FEATURE_AARCH64)
3019                 && dc_isar_feature(aa64_tidcp1, s)) {
3020                 gen_helper_tidcp_el0(tcg_env, tcg_constant_i32(syndrome));
3021             }
3022             break;
3023         case 1:
3024             gen_helper_tidcp_el1(tcg_env, tcg_constant_i32(syndrome));
3025             break;
3026         }
3027     }
3028 
3029     if (!ri) {
3030         /*
3031          * Unknown register; this might be a guest error or a QEMU
3032          * unimplemented feature.
3033          */
3034         if (is64) {
3035             qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
3036                           "64 bit system register cp:%d opc1: %d crm:%d "
3037                           "(%s)\n",
3038                           isread ? "read" : "write", cpnum, opc1, crm,
3039                           s->ns ? "non-secure" : "secure");
3040         } else {
3041             qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
3042                           "system register cp:%d opc1:%d crn:%d crm:%d "
3043                           "opc2:%d (%s)\n",
3044                           isread ? "read" : "write", cpnum, opc1, crn,
3045                           crm, opc2, s->ns ? "non-secure" : "secure");
3046         }
3047         unallocated_encoding(s);
3048         return;
3049     }
3050 
3051     /* Check access permissions */
3052     if (!cp_access_ok(s->current_el, ri, isread)) {
3053         unallocated_encoding(s);
3054         return;
3055     }
3056 
3057     if ((s->hstr_active && s->current_el == 0) || ri->accessfn ||
3058         (ri->fgt && s->fgt_active) ||
3059         (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
3060         /*
3061          * Emit code to perform further access permissions checks at
3062          * runtime; this may result in an exception.
3063          * Note that on XScale all cp0..c13 registers do an access check
3064          * call in order to handle c15_cpar.
3065          */
3066         gen_set_condexec(s);
3067         gen_update_pc(s, 0);
3068         tcg_ri = tcg_temp_new_ptr();
3069         gen_helper_access_check_cp_reg(tcg_ri, tcg_env,
3070                                        tcg_constant_i32(key),
3071                                        tcg_constant_i32(syndrome),
3072                                        tcg_constant_i32(isread));
3073     } else if (ri->type & ARM_CP_RAISES_EXC) {
3074         /*
3075          * The readfn or writefn might raise an exception;
3076          * synchronize the CPU state in case it does.
3077          */
3078         gen_set_condexec(s);
3079         gen_update_pc(s, 0);
3080     }
3081 
3082     /* Handle special cases first */
3083     switch (ri->type & ARM_CP_SPECIAL_MASK) {
3084     case 0:
3085         break;
3086     case ARM_CP_NOP:
3087         return;
3088     case ARM_CP_WFI:
3089         if (isread) {
3090             unallocated_encoding(s);
3091         } else {
3092             gen_update_pc(s, curr_insn_len(s));
3093             s->base.is_jmp = DISAS_WFI;
3094         }
3095         return;
3096     default:
3097         g_assert_not_reached();
3098     }
3099 
3100     if (ri->type & ARM_CP_IO) {
3101         /* I/O operations must end the TB here (whether read or write) */
3102         need_exit_tb = translator_io_start(&s->base);
3103     }
3104 
3105     if (isread) {
3106         /* Read */
3107         if (is64) {
3108             TCGv_i64 tmp64;
3109             TCGv_i32 tmp;
3110             if (ri->type & ARM_CP_CONST) {
3111                 tmp64 = tcg_constant_i64(ri->resetvalue);
3112             } else if (ri->readfn) {
3113                 if (!tcg_ri) {
3114                     tcg_ri = gen_lookup_cp_reg(key);
3115                 }
3116                 tmp64 = tcg_temp_new_i64();
3117                 gen_helper_get_cp_reg64(tmp64, tcg_env, tcg_ri);
3118             } else {
3119                 tmp64 = tcg_temp_new_i64();
3120                 tcg_gen_ld_i64(tmp64, tcg_env, ri->fieldoffset);
3121             }
3122             tmp = tcg_temp_new_i32();
3123             tcg_gen_extrl_i64_i32(tmp, tmp64);
3124             store_reg(s, rt, tmp);
3125             tmp = tcg_temp_new_i32();
3126             tcg_gen_extrh_i64_i32(tmp, tmp64);
3127             store_reg(s, rt2, tmp);
3128         } else {
3129             TCGv_i32 tmp;
3130             if (ri->type & ARM_CP_CONST) {
3131                 tmp = tcg_constant_i32(ri->resetvalue);
3132             } else if (ri->readfn) {
3133                 if (!tcg_ri) {
3134                     tcg_ri = gen_lookup_cp_reg(key);
3135                 }
3136                 tmp = tcg_temp_new_i32();
3137                 gen_helper_get_cp_reg(tmp, tcg_env, tcg_ri);
3138             } else {
3139                 tmp = load_cpu_offset(ri->fieldoffset);
3140             }
3141             if (rt == 15) {
3142                 /* Destination register of r15 for 32 bit loads sets
3143                  * the condition codes from the high 4 bits of the value
3144                  */
3145                 gen_set_nzcv(tmp);
3146             } else {
3147                 store_reg(s, rt, tmp);
3148             }
3149         }
3150     } else {
3151         /* Write */
3152         if (ri->type & ARM_CP_CONST) {
3153             /* If not forbidden by access permissions, treat as WI */
3154             return;
3155         }
3156 
3157         if (is64) {
3158             TCGv_i32 tmplo, tmphi;
3159             TCGv_i64 tmp64 = tcg_temp_new_i64();
3160             tmplo = load_reg(s, rt);
3161             tmphi = load_reg(s, rt2);
3162             tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
3163             if (ri->writefn) {
3164                 if (!tcg_ri) {
3165                     tcg_ri = gen_lookup_cp_reg(key);
3166                 }
3167                 gen_helper_set_cp_reg64(tcg_env, tcg_ri, tmp64);
3168             } else {
3169                 tcg_gen_st_i64(tmp64, tcg_env, ri->fieldoffset);
3170             }
3171         } else {
3172             TCGv_i32 tmp = load_reg(s, rt);
3173             if (ri->writefn) {
3174                 if (!tcg_ri) {
3175                     tcg_ri = gen_lookup_cp_reg(key);
3176                 }
3177                 gen_helper_set_cp_reg(tcg_env, tcg_ri, tmp);
3178             } else {
3179                 store_cpu_offset(tmp, ri->fieldoffset, 4);
3180             }
3181         }
3182     }
3183 
3184     if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
3185         /*
3186          * A write to any coprocessor register that ends a TB
3187          * must rebuild the hflags for the next TB.
3188          */
3189         gen_rebuild_hflags(s, ri->type & ARM_CP_NEWEL);
3190         /*
3191          * We default to ending the TB on a coprocessor register write,
3192          * but allow this to be suppressed by the register definition
3193          * (usually only necessary to work around guest bugs).
3194          */
3195         need_exit_tb = true;
3196     }
3197     if (need_exit_tb) {
3198         gen_lookup_tb(s);
3199     }
3200 }
3201 
3202 /* Decode XScale DSP or iWMMXt insn (in the copro space, cp=0 or 1) */
disas_xscale_insn(DisasContext * s,uint32_t insn)3203 static void disas_xscale_insn(DisasContext *s, uint32_t insn)
3204 {
3205     int cpnum = (insn >> 8) & 0xf;
3206 
3207     if (extract32(s->c15_cpar, cpnum, 1) == 0) {
3208         unallocated_encoding(s);
3209     } else if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
3210         if (disas_iwmmxt_insn(s, insn)) {
3211             unallocated_encoding(s);
3212         }
3213     } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
3214         if (disas_dsp_insn(s, insn)) {
3215             unallocated_encoding(s);
3216         }
3217     }
3218 }
3219 
3220 /* Store a 64-bit value to a register pair.  Clobbers val.  */
gen_storeq_reg(DisasContext * s,int rlow,int rhigh,TCGv_i64 val)3221 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
3222 {
3223     TCGv_i32 tmp;
3224     tmp = tcg_temp_new_i32();
3225     tcg_gen_extrl_i64_i32(tmp, val);
3226     store_reg(s, rlow, tmp);
3227     tmp = tcg_temp_new_i32();
3228     tcg_gen_extrh_i64_i32(tmp, val);
3229     store_reg(s, rhigh, tmp);
3230 }
3231 
3232 /* load and add a 64-bit value from a register pair.  */
gen_addq(DisasContext * s,TCGv_i64 val,int rlow,int rhigh)3233 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
3234 {
3235     TCGv_i64 tmp;
3236     TCGv_i32 tmpl;
3237     TCGv_i32 tmph;
3238 
3239     /* Load 64-bit value rd:rn.  */
3240     tmpl = load_reg(s, rlow);
3241     tmph = load_reg(s, rhigh);
3242     tmp = tcg_temp_new_i64();
3243     tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
3244     tcg_gen_add_i64(val, val, tmp);
3245 }
3246 
3247 /* Set N and Z flags from hi|lo.  */
gen_logicq_cc(TCGv_i32 lo,TCGv_i32 hi)3248 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
3249 {
3250     tcg_gen_mov_i32(cpu_NF, hi);
3251     tcg_gen_or_i32(cpu_ZF, lo, hi);
3252 }
3253 
3254 /* Load/Store exclusive instructions are implemented by remembering
3255    the value/address loaded, and seeing if these are the same
3256    when the store is performed.  This should be sufficient to implement
3257    the architecturally mandated semantics, and avoids having to monitor
3258    regular stores.  The compare vs the remembered value is done during
3259    the cmpxchg operation, but we must compare the addresses manually.  */
gen_load_exclusive(DisasContext * s,int rt,int rt2,TCGv_i32 addr,int size)3260 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
3261                                TCGv_i32 addr, int size)
3262 {
3263     TCGv_i32 tmp = tcg_temp_new_i32();
3264     MemOp opc = size | MO_ALIGN | s->be_data;
3265 
3266     s->is_ldex = true;
3267 
3268     if (size == 3) {
3269         TCGv_i32 tmp2 = tcg_temp_new_i32();
3270         TCGv_i64 t64 = tcg_temp_new_i64();
3271 
3272         /*
3273          * For AArch32, architecturally the 32-bit word at the lowest
3274          * address is always Rt and the one at addr+4 is Rt2, even if
3275          * the CPU is big-endian. That means we don't want to do a
3276          * gen_aa32_ld_i64(), which checks SCTLR_B as if for an
3277          * architecturally 64-bit access, but instead do a 64-bit access
3278          * using MO_BE if appropriate and then split the two halves.
3279          */
3280         TCGv taddr = gen_aa32_addr(s, addr, opc);
3281 
3282         tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
3283         tcg_gen_mov_i64(cpu_exclusive_val, t64);
3284         if (s->be_data == MO_BE) {
3285             tcg_gen_extr_i64_i32(tmp2, tmp, t64);
3286         } else {
3287             tcg_gen_extr_i64_i32(tmp, tmp2, t64);
3288         }
3289         store_reg(s, rt2, tmp2);
3290     } else {
3291         gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
3292         tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
3293     }
3294 
3295     store_reg(s, rt, tmp);
3296     tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
3297 }
3298 
gen_clrex(DisasContext * s)3299 static void gen_clrex(DisasContext *s)
3300 {
3301     tcg_gen_movi_i64(cpu_exclusive_addr, -1);
3302 }
3303 
gen_store_exclusive(DisasContext * s,int rd,int rt,int rt2,TCGv_i32 addr,int size)3304 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
3305                                 TCGv_i32 addr, int size)
3306 {
3307     TCGv_i32 t0, t1, t2;
3308     TCGv_i64 extaddr;
3309     TCGv taddr;
3310     TCGLabel *done_label;
3311     TCGLabel *fail_label;
3312     MemOp opc = size | MO_ALIGN | s->be_data;
3313 
3314     /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
3315          [addr] = {Rt};
3316          {Rd} = 0;
3317        } else {
3318          {Rd} = 1;
3319        } */
3320     fail_label = gen_new_label();
3321     done_label = gen_new_label();
3322     extaddr = tcg_temp_new_i64();
3323     tcg_gen_extu_i32_i64(extaddr, addr);
3324     tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
3325 
3326     taddr = gen_aa32_addr(s, addr, opc);
3327     t0 = tcg_temp_new_i32();
3328     t1 = load_reg(s, rt);
3329     if (size == 3) {
3330         TCGv_i64 o64 = tcg_temp_new_i64();
3331         TCGv_i64 n64 = tcg_temp_new_i64();
3332 
3333         t2 = load_reg(s, rt2);
3334 
3335         /*
3336          * For AArch32, architecturally the 32-bit word at the lowest
3337          * address is always Rt and the one at addr+4 is Rt2, even if
3338          * the CPU is big-endian. Since we're going to treat this as a
3339          * single 64-bit BE store, we need to put the two halves in the
3340          * opposite order for BE to LE, so that they end up in the right
3341          * places.  We don't want gen_aa32_st_i64, because that checks
3342          * SCTLR_B as if for an architectural 64-bit access.
3343          */
3344         if (s->be_data == MO_BE) {
3345             tcg_gen_concat_i32_i64(n64, t2, t1);
3346         } else {
3347             tcg_gen_concat_i32_i64(n64, t1, t2);
3348         }
3349 
3350         tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
3351                                    get_mem_index(s), opc);
3352 
3353         tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
3354         tcg_gen_extrl_i64_i32(t0, o64);
3355     } else {
3356         t2 = tcg_temp_new_i32();
3357         tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
3358         tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
3359         tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
3360     }
3361     tcg_gen_mov_i32(cpu_R[rd], t0);
3362     tcg_gen_br(done_label);
3363 
3364     gen_set_label(fail_label);
3365     tcg_gen_movi_i32(cpu_R[rd], 1);
3366     gen_set_label(done_label);
3367     tcg_gen_movi_i64(cpu_exclusive_addr, -1);
3368 }
3369 
3370 /* gen_srs:
3371  * @env: CPUARMState
3372  * @s: DisasContext
3373  * @mode: mode field from insn (which stack to store to)
3374  * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
3375  * @writeback: true if writeback bit set
3376  *
3377  * Generate code for the SRS (Store Return State) insn.
3378  */
gen_srs(DisasContext * s,uint32_t mode,uint32_t amode,bool writeback)3379 static void gen_srs(DisasContext *s,
3380                     uint32_t mode, uint32_t amode, bool writeback)
3381 {
3382     int32_t offset;
3383     TCGv_i32 addr, tmp;
3384     bool undef = false;
3385 
3386     /* SRS is:
3387      * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
3388      *   and specified mode is monitor mode
3389      * - UNDEFINED in Hyp mode
3390      * - UNPREDICTABLE in User or System mode
3391      * - UNPREDICTABLE if the specified mode is:
3392      * -- not implemented
3393      * -- not a valid mode number
3394      * -- a mode that's at a higher exception level
3395      * -- Monitor, if we are Non-secure
3396      * For the UNPREDICTABLE cases we choose to UNDEF.
3397      */
3398     if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
3399         gen_exception_insn_el(s, 0, EXCP_UDEF, syn_uncategorized(), 3);
3400         return;
3401     }
3402 
3403     if (s->current_el == 0 || s->current_el == 2) {
3404         undef = true;
3405     }
3406 
3407     switch (mode) {
3408     case ARM_CPU_MODE_USR:
3409     case ARM_CPU_MODE_FIQ:
3410     case ARM_CPU_MODE_IRQ:
3411     case ARM_CPU_MODE_SVC:
3412     case ARM_CPU_MODE_ABT:
3413     case ARM_CPU_MODE_UND:
3414     case ARM_CPU_MODE_SYS:
3415         break;
3416     case ARM_CPU_MODE_HYP:
3417         if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
3418             undef = true;
3419         }
3420         break;
3421     case ARM_CPU_MODE_MON:
3422         /* No need to check specifically for "are we non-secure" because
3423          * we've already made EL0 UNDEF and handled the trap for S-EL1;
3424          * so if this isn't EL3 then we must be non-secure.
3425          */
3426         if (s->current_el != 3) {
3427             undef = true;
3428         }
3429         break;
3430     default:
3431         undef = true;
3432     }
3433 
3434     if (undef) {
3435         unallocated_encoding(s);
3436         return;
3437     }
3438 
3439     addr = tcg_temp_new_i32();
3440     /* get_r13_banked() will raise an exception if called from System mode */
3441     gen_set_condexec(s);
3442     gen_update_pc(s, 0);
3443     gen_helper_get_r13_banked(addr, tcg_env, tcg_constant_i32(mode));
3444     switch (amode) {
3445     case 0: /* DA */
3446         offset = -4;
3447         break;
3448     case 1: /* IA */
3449         offset = 0;
3450         break;
3451     case 2: /* DB */
3452         offset = -8;
3453         break;
3454     case 3: /* IB */
3455         offset = 4;
3456         break;
3457     default:
3458         g_assert_not_reached();
3459     }
3460     tcg_gen_addi_i32(addr, addr, offset);
3461     tmp = load_reg(s, 14);
3462     gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
3463     tmp = load_cpu_field(spsr);
3464     tcg_gen_addi_i32(addr, addr, 4);
3465     gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
3466     if (writeback) {
3467         switch (amode) {
3468         case 0:
3469             offset = -8;
3470             break;
3471         case 1:
3472             offset = 4;
3473             break;
3474         case 2:
3475             offset = -4;
3476             break;
3477         case 3:
3478             offset = 0;
3479             break;
3480         default:
3481             g_assert_not_reached();
3482         }
3483         tcg_gen_addi_i32(addr, addr, offset);
3484         gen_helper_set_r13_banked(tcg_env, tcg_constant_i32(mode), addr);
3485     }
3486     s->base.is_jmp = DISAS_UPDATE_EXIT;
3487 }
3488 
3489 /* Skip this instruction if the ARM condition is false */
arm_skip_unless(DisasContext * s,uint32_t cond)3490 static void arm_skip_unless(DisasContext *s, uint32_t cond)
3491 {
3492     arm_gen_condlabel(s);
3493     arm_gen_test_cc(cond ^ 1, s->condlabel.label);
3494 }
3495 
3496 
3497 /*
3498  * Constant expanders used by T16/T32 decode
3499  */
3500 
3501 /* Return only the rotation part of T32ExpandImm.  */
t32_expandimm_rot(DisasContext * s,int x)3502 static int t32_expandimm_rot(DisasContext *s, int x)
3503 {
3504     return x & 0xc00 ? extract32(x, 7, 5) : 0;
3505 }
3506 
3507 /* Return the unrotated immediate from T32ExpandImm.  */
t32_expandimm_imm(DisasContext * s,int x)3508 static int t32_expandimm_imm(DisasContext *s, int x)
3509 {
3510     int imm = extract32(x, 0, 8);
3511 
3512     switch (extract32(x, 8, 4)) {
3513     case 0: /* XY */
3514         /* Nothing to do.  */
3515         break;
3516     case 1: /* 00XY00XY */
3517         imm *= 0x00010001;
3518         break;
3519     case 2: /* XY00XY00 */
3520         imm *= 0x01000100;
3521         break;
3522     case 3: /* XYXYXYXY */
3523         imm *= 0x01010101;
3524         break;
3525     default:
3526         /* Rotated constant.  */
3527         imm |= 0x80;
3528         break;
3529     }
3530     return imm;
3531 }
3532 
t32_branch24(DisasContext * s,int x)3533 static int t32_branch24(DisasContext *s, int x)
3534 {
3535     /* Convert J1:J2 at x[22:21] to I2:I1, which involves I=J^~S.  */
3536     x ^= !(x < 0) * (3 << 21);
3537     /* Append the final zero.  */
3538     return x << 1;
3539 }
3540 
t16_setflags(DisasContext * s)3541 static int t16_setflags(DisasContext *s)
3542 {
3543     return s->condexec_mask == 0;
3544 }
3545 
t16_push_list(DisasContext * s,int x)3546 static int t16_push_list(DisasContext *s, int x)
3547 {
3548     return (x & 0xff) | (x & 0x100) << (14 - 8);
3549 }
3550 
t16_pop_list(DisasContext * s,int x)3551 static int t16_pop_list(DisasContext *s, int x)
3552 {
3553     return (x & 0xff) | (x & 0x100) << (15 - 8);
3554 }
3555 
3556 /*
3557  * Include the generated decoders.
3558  */
3559 
3560 #include "decode-a32.c.inc"
3561 #include "decode-a32-uncond.c.inc"
3562 #include "decode-t32.c.inc"
3563 #include "decode-t16.c.inc"
3564 
valid_cp(DisasContext * s,int cp)3565 static bool valid_cp(DisasContext *s, int cp)
3566 {
3567     /*
3568      * Return true if this coprocessor field indicates something
3569      * that's really a possible coprocessor.
3570      * For v7 and earlier, coprocessors 8..15 were reserved for Arm use,
3571      * and of those only cp14 and cp15 were used for registers.
3572      * cp10 and cp11 were used for VFP and Neon, whose decode is
3573      * dealt with elsewhere. With the advent of fp16, cp9 is also
3574      * now part of VFP.
3575      * For v8A and later, the encoding has been tightened so that
3576      * only cp14 and cp15 are valid, and other values aren't considered
3577      * to be in the coprocessor-instruction space at all. v8M still
3578      * permits coprocessors 0..7.
3579      * For XScale, we must not decode the XScale cp0, cp1 space as
3580      * a standard coprocessor insn, because we want to fall through to
3581      * the legacy disas_xscale_insn() decoder after decodetree is done.
3582      */
3583     if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cp == 0 || cp == 1)) {
3584         return false;
3585     }
3586 
3587     if (arm_dc_feature(s, ARM_FEATURE_V8) &&
3588         !arm_dc_feature(s, ARM_FEATURE_M)) {
3589         return cp >= 14;
3590     }
3591     return cp < 8 || cp >= 14;
3592 }
3593 
trans_MCR(DisasContext * s,arg_MCR * a)3594 static bool trans_MCR(DisasContext *s, arg_MCR *a)
3595 {
3596     if (!valid_cp(s, a->cp)) {
3597         return false;
3598     }
3599     do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2,
3600                    false, a->rt, 0);
3601     return true;
3602 }
3603 
trans_MRC(DisasContext * s,arg_MRC * a)3604 static bool trans_MRC(DisasContext *s, arg_MRC *a)
3605 {
3606     if (!valid_cp(s, a->cp)) {
3607         return false;
3608     }
3609     do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2,
3610                    true, a->rt, 0);
3611     return true;
3612 }
3613 
trans_MCRR(DisasContext * s,arg_MCRR * a)3614 static bool trans_MCRR(DisasContext *s, arg_MCRR *a)
3615 {
3616     if (!valid_cp(s, a->cp)) {
3617         return false;
3618     }
3619     do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0,
3620                    false, a->rt, a->rt2);
3621     return true;
3622 }
3623 
trans_MRRC(DisasContext * s,arg_MRRC * a)3624 static bool trans_MRRC(DisasContext *s, arg_MRRC *a)
3625 {
3626     if (!valid_cp(s, a->cp)) {
3627         return false;
3628     }
3629     do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0,
3630                    true, a->rt, a->rt2);
3631     return true;
3632 }
3633 
3634 /* Helpers to swap operands for reverse-subtract.  */
gen_rsb(TCGv_i32 dst,TCGv_i32 a,TCGv_i32 b)3635 static void gen_rsb(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
3636 {
3637     tcg_gen_sub_i32(dst, b, a);
3638 }
3639 
gen_rsb_CC(TCGv_i32 dst,TCGv_i32 a,TCGv_i32 b)3640 static void gen_rsb_CC(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
3641 {
3642     gen_sub_CC(dst, b, a);
3643 }
3644 
gen_rsc(TCGv_i32 dest,TCGv_i32 a,TCGv_i32 b)3645 static void gen_rsc(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
3646 {
3647     gen_sub_carry(dest, b, a);
3648 }
3649 
gen_rsc_CC(TCGv_i32 dest,TCGv_i32 a,TCGv_i32 b)3650 static void gen_rsc_CC(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
3651 {
3652     gen_sbc_CC(dest, b, a);
3653 }
3654 
3655 /*
3656  * Helpers for the data processing routines.
3657  *
3658  * After the computation store the results back.
3659  * This may be suppressed altogether (STREG_NONE), require a runtime
3660  * check against the stack limits (STREG_SP_CHECK), or generate an
3661  * exception return.  Oh, or store into a register.
3662  *
3663  * Always return true, indicating success for a trans_* function.
3664  */
3665 typedef enum {
3666    STREG_NONE,
3667    STREG_NORMAL,
3668    STREG_SP_CHECK,
3669    STREG_EXC_RET,
3670 } StoreRegKind;
3671 
store_reg_kind(DisasContext * s,int rd,TCGv_i32 val,StoreRegKind kind)3672 static bool store_reg_kind(DisasContext *s, int rd,
3673                             TCGv_i32 val, StoreRegKind kind)
3674 {
3675     switch (kind) {
3676     case STREG_NONE:
3677         return true;
3678     case STREG_NORMAL:
3679         /* See ALUWritePC: Interworking only from a32 mode. */
3680         if (s->thumb) {
3681             store_reg(s, rd, val);
3682         } else {
3683             store_reg_bx(s, rd, val);
3684         }
3685         return true;
3686     case STREG_SP_CHECK:
3687         store_sp_checked(s, val);
3688         return true;
3689     case STREG_EXC_RET:
3690         gen_exception_return(s, val);
3691         return true;
3692     }
3693     g_assert_not_reached();
3694 }
3695 
3696 /*
3697  * Data Processing (register)
3698  *
3699  * Operate, with set flags, one register source,
3700  * one immediate shifted register source, and a destination.
3701  */
op_s_rrr_shi(DisasContext * s,arg_s_rrr_shi * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3702 static bool op_s_rrr_shi(DisasContext *s, arg_s_rrr_shi *a,
3703                          void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
3704                          int logic_cc, StoreRegKind kind)
3705 {
3706     TCGv_i32 tmp1, tmp2;
3707 
3708     tmp2 = load_reg(s, a->rm);
3709     gen_arm_shift_im(tmp2, a->shty, a->shim, logic_cc);
3710     tmp1 = load_reg(s, a->rn);
3711 
3712     gen(tmp1, tmp1, tmp2);
3713 
3714     if (logic_cc) {
3715         gen_logic_CC(tmp1);
3716     }
3717     return store_reg_kind(s, a->rd, tmp1, kind);
3718 }
3719 
op_s_rxr_shi(DisasContext * s,arg_s_rrr_shi * a,void (* gen)(TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3720 static bool op_s_rxr_shi(DisasContext *s, arg_s_rrr_shi *a,
3721                          void (*gen)(TCGv_i32, TCGv_i32),
3722                          int logic_cc, StoreRegKind kind)
3723 {
3724     TCGv_i32 tmp;
3725 
3726     tmp = load_reg(s, a->rm);
3727     gen_arm_shift_im(tmp, a->shty, a->shim, logic_cc);
3728 
3729     gen(tmp, tmp);
3730     if (logic_cc) {
3731         gen_logic_CC(tmp);
3732     }
3733     return store_reg_kind(s, a->rd, tmp, kind);
3734 }
3735 
3736 /*
3737  * Data-processing (register-shifted register)
3738  *
3739  * Operate, with set flags, one register source,
3740  * one register shifted register source, and a destination.
3741  */
op_s_rrr_shr(DisasContext * s,arg_s_rrr_shr * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3742 static bool op_s_rrr_shr(DisasContext *s, arg_s_rrr_shr *a,
3743                          void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
3744                          int logic_cc, StoreRegKind kind)
3745 {
3746     TCGv_i32 tmp1, tmp2;
3747 
3748     tmp1 = load_reg(s, a->rs);
3749     tmp2 = load_reg(s, a->rm);
3750     gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
3751     tmp1 = load_reg(s, a->rn);
3752 
3753     gen(tmp1, tmp1, tmp2);
3754 
3755     if (logic_cc) {
3756         gen_logic_CC(tmp1);
3757     }
3758     return store_reg_kind(s, a->rd, tmp1, kind);
3759 }
3760 
op_s_rxr_shr(DisasContext * s,arg_s_rrr_shr * a,void (* gen)(TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3761 static bool op_s_rxr_shr(DisasContext *s, arg_s_rrr_shr *a,
3762                          void (*gen)(TCGv_i32, TCGv_i32),
3763                          int logic_cc, StoreRegKind kind)
3764 {
3765     TCGv_i32 tmp1, tmp2;
3766 
3767     tmp1 = load_reg(s, a->rs);
3768     tmp2 = load_reg(s, a->rm);
3769     gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
3770 
3771     gen(tmp2, tmp2);
3772     if (logic_cc) {
3773         gen_logic_CC(tmp2);
3774     }
3775     return store_reg_kind(s, a->rd, tmp2, kind);
3776 }
3777 
3778 /*
3779  * Data-processing (immediate)
3780  *
3781  * Operate, with set flags, one register source,
3782  * one rotated immediate, and a destination.
3783  *
3784  * Note that logic_cc && a->rot setting CF based on the msb of the
3785  * immediate is the reason why we must pass in the unrotated form
3786  * of the immediate.
3787  */
op_s_rri_rot(DisasContext * s,arg_s_rri_rot * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3788 static bool op_s_rri_rot(DisasContext *s, arg_s_rri_rot *a,
3789                          void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
3790                          int logic_cc, StoreRegKind kind)
3791 {
3792     TCGv_i32 tmp1;
3793     uint32_t imm;
3794 
3795     imm = ror32(a->imm, a->rot);
3796     if (logic_cc && a->rot) {
3797         tcg_gen_movi_i32(cpu_CF, imm >> 31);
3798     }
3799     tmp1 = load_reg(s, a->rn);
3800 
3801     gen(tmp1, tmp1, tcg_constant_i32(imm));
3802 
3803     if (logic_cc) {
3804         gen_logic_CC(tmp1);
3805     }
3806     return store_reg_kind(s, a->rd, tmp1, kind);
3807 }
3808 
op_s_rxi_rot(DisasContext * s,arg_s_rri_rot * a,void (* gen)(TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3809 static bool op_s_rxi_rot(DisasContext *s, arg_s_rri_rot *a,
3810                          void (*gen)(TCGv_i32, TCGv_i32),
3811                          int logic_cc, StoreRegKind kind)
3812 {
3813     TCGv_i32 tmp;
3814     uint32_t imm;
3815 
3816     imm = ror32(a->imm, a->rot);
3817     if (logic_cc && a->rot) {
3818         tcg_gen_movi_i32(cpu_CF, imm >> 31);
3819     }
3820 
3821     tmp = tcg_temp_new_i32();
3822     gen(tmp, tcg_constant_i32(imm));
3823 
3824     if (logic_cc) {
3825         gen_logic_CC(tmp);
3826     }
3827     return store_reg_kind(s, a->rd, tmp, kind);
3828 }
3829 
3830 #define DO_ANY3(NAME, OP, L, K)                                         \
3831     static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a)  \
3832     { StoreRegKind k = (K); return op_s_rrr_shi(s, a, OP, L, k); }      \
3833     static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a)  \
3834     { StoreRegKind k = (K); return op_s_rrr_shr(s, a, OP, L, k); }      \
3835     static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a)   \
3836     { StoreRegKind k = (K); return op_s_rri_rot(s, a, OP, L, k); }
3837 
3838 #define DO_ANY2(NAME, OP, L, K)                                         \
3839     static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a)  \
3840     { StoreRegKind k = (K); return op_s_rxr_shi(s, a, OP, L, k); }      \
3841     static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a)  \
3842     { StoreRegKind k = (K); return op_s_rxr_shr(s, a, OP, L, k); }      \
3843     static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a)   \
3844     { StoreRegKind k = (K); return op_s_rxi_rot(s, a, OP, L, k); }
3845 
3846 #define DO_CMP2(NAME, OP, L)                                            \
3847     static bool trans_##NAME##_xrri(DisasContext *s, arg_s_rrr_shi *a)  \
3848     { return op_s_rrr_shi(s, a, OP, L, STREG_NONE); }                   \
3849     static bool trans_##NAME##_xrrr(DisasContext *s, arg_s_rrr_shr *a)  \
3850     { return op_s_rrr_shr(s, a, OP, L, STREG_NONE); }                   \
3851     static bool trans_##NAME##_xri(DisasContext *s, arg_s_rri_rot *a)   \
3852     { return op_s_rri_rot(s, a, OP, L, STREG_NONE); }
3853 
3854 DO_ANY3(AND, tcg_gen_and_i32, a->s, STREG_NORMAL)
3855 DO_ANY3(EOR, tcg_gen_xor_i32, a->s, STREG_NORMAL)
3856 DO_ANY3(ORR, tcg_gen_or_i32, a->s, STREG_NORMAL)
3857 DO_ANY3(BIC, tcg_gen_andc_i32, a->s, STREG_NORMAL)
3858 
3859 DO_ANY3(RSB, a->s ? gen_rsb_CC : gen_rsb, false, STREG_NORMAL)
3860 DO_ANY3(ADC, a->s ? gen_adc_CC : gen_add_carry, false, STREG_NORMAL)
3861 DO_ANY3(SBC, a->s ? gen_sbc_CC : gen_sub_carry, false, STREG_NORMAL)
3862 DO_ANY3(RSC, a->s ? gen_rsc_CC : gen_rsc, false, STREG_NORMAL)
3863 
DO_CMP2(TST,tcg_gen_and_i32,true)3864 DO_CMP2(TST, tcg_gen_and_i32, true)
3865 DO_CMP2(TEQ, tcg_gen_xor_i32, true)
3866 DO_CMP2(CMN, gen_add_CC, false)
3867 DO_CMP2(CMP, gen_sub_CC, false)
3868 
3869 DO_ANY3(ADD, a->s ? gen_add_CC : tcg_gen_add_i32, false,
3870         a->rd == 13 && a->rn == 13 ? STREG_SP_CHECK : STREG_NORMAL)
3871 
3872 /*
3873  * Note for the computation of StoreRegKind we return out of the
3874  * middle of the functions that are expanded by DO_ANY3, and that
3875  * we modify a->s via that parameter before it is used by OP.
3876  */
3877 DO_ANY3(SUB, a->s ? gen_sub_CC : tcg_gen_sub_i32, false,
3878         ({
3879             StoreRegKind ret = STREG_NORMAL;
3880             if (a->rd == 15 && a->s) {
3881                 /*
3882                  * See ALUExceptionReturn:
3883                  * In User mode, UNPREDICTABLE; we choose UNDEF.
3884                  * In Hyp mode, UNDEFINED.
3885                  */
3886                 if (IS_USER(s) || s->current_el == 2) {
3887                     unallocated_encoding(s);
3888                     return true;
3889                 }
3890                 /* There is no writeback of nzcv to PSTATE.  */
3891                 a->s = 0;
3892                 ret = STREG_EXC_RET;
3893             } else if (a->rd == 13 && a->rn == 13) {
3894                 ret = STREG_SP_CHECK;
3895             }
3896             ret;
3897         }))
3898 
3899 DO_ANY2(MOV, tcg_gen_mov_i32, a->s,
3900         ({
3901             StoreRegKind ret = STREG_NORMAL;
3902             if (a->rd == 15 && a->s) {
3903                 /*
3904                  * See ALUExceptionReturn:
3905                  * In User mode, UNPREDICTABLE; we choose UNDEF.
3906                  * In Hyp mode, UNDEFINED.
3907                  */
3908                 if (IS_USER(s) || s->current_el == 2) {
3909                     unallocated_encoding(s);
3910                     return true;
3911                 }
3912                 /* There is no writeback of nzcv to PSTATE.  */
3913                 a->s = 0;
3914                 ret = STREG_EXC_RET;
3915             } else if (a->rd == 13) {
3916                 ret = STREG_SP_CHECK;
3917             }
3918             ret;
3919         }))
3920 
3921 DO_ANY2(MVN, tcg_gen_not_i32, a->s, STREG_NORMAL)
3922 
3923 /*
3924  * ORN is only available with T32, so there is no register-shifted-register
3925  * form of the insn.  Using the DO_ANY3 macro would create an unused function.
3926  */
3927 static bool trans_ORN_rrri(DisasContext *s, arg_s_rrr_shi *a)
3928 {
3929     return op_s_rrr_shi(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
3930 }
3931 
trans_ORN_rri(DisasContext * s,arg_s_rri_rot * a)3932 static bool trans_ORN_rri(DisasContext *s, arg_s_rri_rot *a)
3933 {
3934     return op_s_rri_rot(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
3935 }
3936 
3937 #undef DO_ANY3
3938 #undef DO_ANY2
3939 #undef DO_CMP2
3940 
trans_ADR(DisasContext * s,arg_ri * a)3941 static bool trans_ADR(DisasContext *s, arg_ri *a)
3942 {
3943     store_reg_bx(s, a->rd, add_reg_for_lit(s, 15, a->imm));
3944     return true;
3945 }
3946 
trans_MOVW(DisasContext * s,arg_MOVW * a)3947 static bool trans_MOVW(DisasContext *s, arg_MOVW *a)
3948 {
3949     if (!ENABLE_ARCH_6T2) {
3950         return false;
3951     }
3952 
3953     store_reg(s, a->rd, tcg_constant_i32(a->imm));
3954     return true;
3955 }
3956 
trans_MOVT(DisasContext * s,arg_MOVW * a)3957 static bool trans_MOVT(DisasContext *s, arg_MOVW *a)
3958 {
3959     TCGv_i32 tmp;
3960 
3961     if (!ENABLE_ARCH_6T2) {
3962         return false;
3963     }
3964 
3965     tmp = load_reg(s, a->rd);
3966     tcg_gen_ext16u_i32(tmp, tmp);
3967     tcg_gen_ori_i32(tmp, tmp, a->imm << 16);
3968     store_reg(s, a->rd, tmp);
3969     return true;
3970 }
3971 
3972 /*
3973  * v8.1M MVE wide-shifts
3974  */
do_mve_shl_ri(DisasContext * s,arg_mve_shl_ri * a,WideShiftImmFn * fn)3975 static bool do_mve_shl_ri(DisasContext *s, arg_mve_shl_ri *a,
3976                           WideShiftImmFn *fn)
3977 {
3978     TCGv_i64 rda;
3979     TCGv_i32 rdalo, rdahi;
3980 
3981     if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
3982         /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
3983         return false;
3984     }
3985     if (a->rdahi == 15) {
3986         /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
3987         return false;
3988     }
3989     if (!dc_isar_feature(aa32_mve, s) ||
3990         !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
3991         a->rdahi == 13) {
3992         /* RdaHi == 13 is UNPREDICTABLE; we choose to UNDEF */
3993         unallocated_encoding(s);
3994         return true;
3995     }
3996 
3997     if (a->shim == 0) {
3998         a->shim = 32;
3999     }
4000 
4001     rda = tcg_temp_new_i64();
4002     rdalo = load_reg(s, a->rdalo);
4003     rdahi = load_reg(s, a->rdahi);
4004     tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
4005 
4006     fn(rda, rda, a->shim);
4007 
4008     tcg_gen_extrl_i64_i32(rdalo, rda);
4009     tcg_gen_extrh_i64_i32(rdahi, rda);
4010     store_reg(s, a->rdalo, rdalo);
4011     store_reg(s, a->rdahi, rdahi);
4012 
4013     return true;
4014 }
4015 
trans_ASRL_ri(DisasContext * s,arg_mve_shl_ri * a)4016 static bool trans_ASRL_ri(DisasContext *s, arg_mve_shl_ri *a)
4017 {
4018     return do_mve_shl_ri(s, a, tcg_gen_sari_i64);
4019 }
4020 
trans_LSLL_ri(DisasContext * s,arg_mve_shl_ri * a)4021 static bool trans_LSLL_ri(DisasContext *s, arg_mve_shl_ri *a)
4022 {
4023     return do_mve_shl_ri(s, a, tcg_gen_shli_i64);
4024 }
4025 
trans_LSRL_ri(DisasContext * s,arg_mve_shl_ri * a)4026 static bool trans_LSRL_ri(DisasContext *s, arg_mve_shl_ri *a)
4027 {
4028     return do_mve_shl_ri(s, a, tcg_gen_shri_i64);
4029 }
4030 
gen_mve_sqshll(TCGv_i64 r,TCGv_i64 n,int64_t shift)4031 static void gen_mve_sqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
4032 {
4033     gen_helper_mve_sqshll(r, tcg_env, n, tcg_constant_i32(shift));
4034 }
4035 
trans_SQSHLL_ri(DisasContext * s,arg_mve_shl_ri * a)4036 static bool trans_SQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
4037 {
4038     return do_mve_shl_ri(s, a, gen_mve_sqshll);
4039 }
4040 
gen_mve_uqshll(TCGv_i64 r,TCGv_i64 n,int64_t shift)4041 static void gen_mve_uqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
4042 {
4043     gen_helper_mve_uqshll(r, tcg_env, n, tcg_constant_i32(shift));
4044 }
4045 
trans_UQSHLL_ri(DisasContext * s,arg_mve_shl_ri * a)4046 static bool trans_UQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
4047 {
4048     return do_mve_shl_ri(s, a, gen_mve_uqshll);
4049 }
4050 
trans_SRSHRL_ri(DisasContext * s,arg_mve_shl_ri * a)4051 static bool trans_SRSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
4052 {
4053     return do_mve_shl_ri(s, a, gen_srshr64_i64);
4054 }
4055 
trans_URSHRL_ri(DisasContext * s,arg_mve_shl_ri * a)4056 static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
4057 {
4058     return do_mve_shl_ri(s, a, gen_urshr64_i64);
4059 }
4060 
do_mve_shl_rr(DisasContext * s,arg_mve_shl_rr * a,WideShiftFn * fn)4061 static bool do_mve_shl_rr(DisasContext *s, arg_mve_shl_rr *a, WideShiftFn *fn)
4062 {
4063     TCGv_i64 rda;
4064     TCGv_i32 rdalo, rdahi;
4065 
4066     if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
4067         /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
4068         return false;
4069     }
4070     if (a->rdahi == 15) {
4071         /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
4072         return false;
4073     }
4074     if (!dc_isar_feature(aa32_mve, s) ||
4075         !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
4076         a->rdahi == 13 || a->rm == 13 || a->rm == 15 ||
4077         a->rm == a->rdahi || a->rm == a->rdalo) {
4078         /* These rdahi/rdalo/rm cases are UNPREDICTABLE; we choose to UNDEF */
4079         unallocated_encoding(s);
4080         return true;
4081     }
4082 
4083     rda = tcg_temp_new_i64();
4084     rdalo = load_reg(s, a->rdalo);
4085     rdahi = load_reg(s, a->rdahi);
4086     tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
4087 
4088     /* The helper takes care of the sign-extension of the low 8 bits of Rm */
4089     fn(rda, tcg_env, rda, cpu_R[a->rm]);
4090 
4091     tcg_gen_extrl_i64_i32(rdalo, rda);
4092     tcg_gen_extrh_i64_i32(rdahi, rda);
4093     store_reg(s, a->rdalo, rdalo);
4094     store_reg(s, a->rdahi, rdahi);
4095 
4096     return true;
4097 }
4098 
trans_LSLL_rr(DisasContext * s,arg_mve_shl_rr * a)4099 static bool trans_LSLL_rr(DisasContext *s, arg_mve_shl_rr *a)
4100 {
4101     return do_mve_shl_rr(s, a, gen_helper_mve_ushll);
4102 }
4103 
trans_ASRL_rr(DisasContext * s,arg_mve_shl_rr * a)4104 static bool trans_ASRL_rr(DisasContext *s, arg_mve_shl_rr *a)
4105 {
4106     return do_mve_shl_rr(s, a, gen_helper_mve_sshrl);
4107 }
4108 
trans_UQRSHLL64_rr(DisasContext * s,arg_mve_shl_rr * a)4109 static bool trans_UQRSHLL64_rr(DisasContext *s, arg_mve_shl_rr *a)
4110 {
4111     return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll);
4112 }
4113 
trans_SQRSHRL64_rr(DisasContext * s,arg_mve_shl_rr * a)4114 static bool trans_SQRSHRL64_rr(DisasContext *s, arg_mve_shl_rr *a)
4115 {
4116     return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl);
4117 }
4118 
trans_UQRSHLL48_rr(DisasContext * s,arg_mve_shl_rr * a)4119 static bool trans_UQRSHLL48_rr(DisasContext *s, arg_mve_shl_rr *a)
4120 {
4121     return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll48);
4122 }
4123 
trans_SQRSHRL48_rr(DisasContext * s,arg_mve_shl_rr * a)4124 static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
4125 {
4126     return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
4127 }
4128 
do_mve_sh_ri(DisasContext * s,arg_mve_sh_ri * a,ShiftImmFn * fn)4129 static bool do_mve_sh_ri(DisasContext *s, arg_mve_sh_ri *a, ShiftImmFn *fn)
4130 {
4131     if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
4132         /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
4133         return false;
4134     }
4135     if (!dc_isar_feature(aa32_mve, s) ||
4136         !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
4137         a->rda == 13 || a->rda == 15) {
4138         /* These rda cases are UNPREDICTABLE; we choose to UNDEF */
4139         unallocated_encoding(s);
4140         return true;
4141     }
4142 
4143     if (a->shim == 0) {
4144         a->shim = 32;
4145     }
4146     fn(cpu_R[a->rda], cpu_R[a->rda], a->shim);
4147 
4148     return true;
4149 }
4150 
trans_URSHR_ri(DisasContext * s,arg_mve_sh_ri * a)4151 static bool trans_URSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
4152 {
4153     return do_mve_sh_ri(s, a, gen_urshr32_i32);
4154 }
4155 
trans_SRSHR_ri(DisasContext * s,arg_mve_sh_ri * a)4156 static bool trans_SRSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
4157 {
4158     return do_mve_sh_ri(s, a, gen_srshr32_i32);
4159 }
4160 
gen_mve_sqshl(TCGv_i32 r,TCGv_i32 n,int32_t shift)4161 static void gen_mve_sqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
4162 {
4163     gen_helper_mve_sqshl(r, tcg_env, n, tcg_constant_i32(shift));
4164 }
4165 
trans_SQSHL_ri(DisasContext * s,arg_mve_sh_ri * a)4166 static bool trans_SQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
4167 {
4168     return do_mve_sh_ri(s, a, gen_mve_sqshl);
4169 }
4170 
gen_mve_uqshl(TCGv_i32 r,TCGv_i32 n,int32_t shift)4171 static void gen_mve_uqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
4172 {
4173     gen_helper_mve_uqshl(r, tcg_env, n, tcg_constant_i32(shift));
4174 }
4175 
trans_UQSHL_ri(DisasContext * s,arg_mve_sh_ri * a)4176 static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
4177 {
4178     return do_mve_sh_ri(s, a, gen_mve_uqshl);
4179 }
4180 
do_mve_sh_rr(DisasContext * s,arg_mve_sh_rr * a,ShiftFn * fn)4181 static bool do_mve_sh_rr(DisasContext *s, arg_mve_sh_rr *a, ShiftFn *fn)
4182 {
4183     if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
4184         /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
4185         return false;
4186     }
4187     if (!dc_isar_feature(aa32_mve, s) ||
4188         !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
4189         a->rda == 13 || a->rda == 15 || a->rm == 13 || a->rm == 15 ||
4190         a->rm == a->rda) {
4191         /* These rda/rm cases are UNPREDICTABLE; we choose to UNDEF */
4192         unallocated_encoding(s);
4193         return true;
4194     }
4195 
4196     /* The helper takes care of the sign-extension of the low 8 bits of Rm */
4197     fn(cpu_R[a->rda], tcg_env, cpu_R[a->rda], cpu_R[a->rm]);
4198     return true;
4199 }
4200 
trans_SQRSHR_rr(DisasContext * s,arg_mve_sh_rr * a)4201 static bool trans_SQRSHR_rr(DisasContext *s, arg_mve_sh_rr *a)
4202 {
4203     return do_mve_sh_rr(s, a, gen_helper_mve_sqrshr);
4204 }
4205 
trans_UQRSHL_rr(DisasContext * s,arg_mve_sh_rr * a)4206 static bool trans_UQRSHL_rr(DisasContext *s, arg_mve_sh_rr *a)
4207 {
4208     return do_mve_sh_rr(s, a, gen_helper_mve_uqrshl);
4209 }
4210 
4211 /*
4212  * Multiply and multiply accumulate
4213  */
4214 
op_mla(DisasContext * s,arg_s_rrrr * a,bool add)4215 static bool op_mla(DisasContext *s, arg_s_rrrr *a, bool add)
4216 {
4217     TCGv_i32 t1, t2;
4218 
4219     t1 = load_reg(s, a->rn);
4220     t2 = load_reg(s, a->rm);
4221     tcg_gen_mul_i32(t1, t1, t2);
4222     if (add) {
4223         t2 = load_reg(s, a->ra);
4224         tcg_gen_add_i32(t1, t1, t2);
4225     }
4226     if (a->s) {
4227         gen_logic_CC(t1);
4228     }
4229     store_reg(s, a->rd, t1);
4230     return true;
4231 }
4232 
trans_MUL(DisasContext * s,arg_MUL * a)4233 static bool trans_MUL(DisasContext *s, arg_MUL *a)
4234 {
4235     return op_mla(s, a, false);
4236 }
4237 
trans_MLA(DisasContext * s,arg_MLA * a)4238 static bool trans_MLA(DisasContext *s, arg_MLA *a)
4239 {
4240     return op_mla(s, a, true);
4241 }
4242 
trans_MLS(DisasContext * s,arg_MLS * a)4243 static bool trans_MLS(DisasContext *s, arg_MLS *a)
4244 {
4245     TCGv_i32 t1, t2;
4246 
4247     if (!ENABLE_ARCH_6T2) {
4248         return false;
4249     }
4250     t1 = load_reg(s, a->rn);
4251     t2 = load_reg(s, a->rm);
4252     tcg_gen_mul_i32(t1, t1, t2);
4253     t2 = load_reg(s, a->ra);
4254     tcg_gen_sub_i32(t1, t2, t1);
4255     store_reg(s, a->rd, t1);
4256     return true;
4257 }
4258 
op_mlal(DisasContext * s,arg_s_rrrr * a,bool uns,bool add)4259 static bool op_mlal(DisasContext *s, arg_s_rrrr *a, bool uns, bool add)
4260 {
4261     TCGv_i32 t0, t1, t2, t3;
4262 
4263     t0 = load_reg(s, a->rm);
4264     t1 = load_reg(s, a->rn);
4265     if (uns) {
4266         tcg_gen_mulu2_i32(t0, t1, t0, t1);
4267     } else {
4268         tcg_gen_muls2_i32(t0, t1, t0, t1);
4269     }
4270     if (add) {
4271         t2 = load_reg(s, a->ra);
4272         t3 = load_reg(s, a->rd);
4273         tcg_gen_add2_i32(t0, t1, t0, t1, t2, t3);
4274     }
4275     if (a->s) {
4276         gen_logicq_cc(t0, t1);
4277     }
4278     store_reg(s, a->ra, t0);
4279     store_reg(s, a->rd, t1);
4280     return true;
4281 }
4282 
trans_UMULL(DisasContext * s,arg_UMULL * a)4283 static bool trans_UMULL(DisasContext *s, arg_UMULL *a)
4284 {
4285     return op_mlal(s, a, true, false);
4286 }
4287 
trans_SMULL(DisasContext * s,arg_SMULL * a)4288 static bool trans_SMULL(DisasContext *s, arg_SMULL *a)
4289 {
4290     return op_mlal(s, a, false, false);
4291 }
4292 
trans_UMLAL(DisasContext * s,arg_UMLAL * a)4293 static bool trans_UMLAL(DisasContext *s, arg_UMLAL *a)
4294 {
4295     return op_mlal(s, a, true, true);
4296 }
4297 
trans_SMLAL(DisasContext * s,arg_SMLAL * a)4298 static bool trans_SMLAL(DisasContext *s, arg_SMLAL *a)
4299 {
4300     return op_mlal(s, a, false, true);
4301 }
4302 
trans_UMAAL(DisasContext * s,arg_UMAAL * a)4303 static bool trans_UMAAL(DisasContext *s, arg_UMAAL *a)
4304 {
4305     TCGv_i32 t0, t1, t2, zero;
4306 
4307     if (s->thumb
4308         ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
4309         : !ENABLE_ARCH_6) {
4310         return false;
4311     }
4312 
4313     t0 = load_reg(s, a->rm);
4314     t1 = load_reg(s, a->rn);
4315     tcg_gen_mulu2_i32(t0, t1, t0, t1);
4316     zero = tcg_constant_i32(0);
4317     t2 = load_reg(s, a->ra);
4318     tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
4319     t2 = load_reg(s, a->rd);
4320     tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
4321     store_reg(s, a->ra, t0);
4322     store_reg(s, a->rd, t1);
4323     return true;
4324 }
4325 
4326 /*
4327  * Saturating addition and subtraction
4328  */
4329 
op_qaddsub(DisasContext * s,arg_rrr * a,bool add,bool doub)4330 static bool op_qaddsub(DisasContext *s, arg_rrr *a, bool add, bool doub)
4331 {
4332     TCGv_i32 t0, t1;
4333 
4334     if (s->thumb
4335         ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
4336         : !ENABLE_ARCH_5TE) {
4337         return false;
4338     }
4339 
4340     t0 = load_reg(s, a->rm);
4341     t1 = load_reg(s, a->rn);
4342     if (doub) {
4343         gen_helper_add_saturate(t1, tcg_env, t1, t1);
4344     }
4345     if (add) {
4346         gen_helper_add_saturate(t0, tcg_env, t0, t1);
4347     } else {
4348         gen_helper_sub_saturate(t0, tcg_env, t0, t1);
4349     }
4350     store_reg(s, a->rd, t0);
4351     return true;
4352 }
4353 
4354 #define DO_QADDSUB(NAME, ADD, DOUB) \
4355 static bool trans_##NAME(DisasContext *s, arg_rrr *a)    \
4356 {                                                        \
4357     return op_qaddsub(s, a, ADD, DOUB);                  \
4358 }
4359 
DO_QADDSUB(QADD,true,false)4360 DO_QADDSUB(QADD, true, false)
4361 DO_QADDSUB(QSUB, false, false)
4362 DO_QADDSUB(QDADD, true, true)
4363 DO_QADDSUB(QDSUB, false, true)
4364 
4365 #undef DO_QADDSUB
4366 
4367 /*
4368  * Halfword multiply and multiply accumulate
4369  */
4370 
4371 static bool op_smlaxxx(DisasContext *s, arg_rrrr *a,
4372                        int add_long, bool nt, bool mt)
4373 {
4374     TCGv_i32 t0, t1, tl, th;
4375 
4376     if (s->thumb
4377         ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
4378         : !ENABLE_ARCH_5TE) {
4379         return false;
4380     }
4381 
4382     t0 = load_reg(s, a->rn);
4383     t1 = load_reg(s, a->rm);
4384     gen_mulxy(t0, t1, nt, mt);
4385 
4386     switch (add_long) {
4387     case 0:
4388         store_reg(s, a->rd, t0);
4389         break;
4390     case 1:
4391         t1 = load_reg(s, a->ra);
4392         gen_helper_add_setq(t0, tcg_env, t0, t1);
4393         store_reg(s, a->rd, t0);
4394         break;
4395     case 2:
4396         tl = load_reg(s, a->ra);
4397         th = load_reg(s, a->rd);
4398         /* Sign-extend the 32-bit product to 64 bits.  */
4399         t1 = tcg_temp_new_i32();
4400         tcg_gen_sari_i32(t1, t0, 31);
4401         tcg_gen_add2_i32(tl, th, tl, th, t0, t1);
4402         store_reg(s, a->ra, tl);
4403         store_reg(s, a->rd, th);
4404         break;
4405     default:
4406         g_assert_not_reached();
4407     }
4408     return true;
4409 }
4410 
4411 #define DO_SMLAX(NAME, add, nt, mt) \
4412 static bool trans_##NAME(DisasContext *s, arg_rrrr *a)     \
4413 {                                                          \
4414     return op_smlaxxx(s, a, add, nt, mt);                  \
4415 }
4416 
4417 DO_SMLAX(SMULBB, 0, 0, 0)
4418 DO_SMLAX(SMULBT, 0, 0, 1)
4419 DO_SMLAX(SMULTB, 0, 1, 0)
4420 DO_SMLAX(SMULTT, 0, 1, 1)
4421 
4422 DO_SMLAX(SMLABB, 1, 0, 0)
4423 DO_SMLAX(SMLABT, 1, 0, 1)
4424 DO_SMLAX(SMLATB, 1, 1, 0)
4425 DO_SMLAX(SMLATT, 1, 1, 1)
4426 
4427 DO_SMLAX(SMLALBB, 2, 0, 0)
4428 DO_SMLAX(SMLALBT, 2, 0, 1)
4429 DO_SMLAX(SMLALTB, 2, 1, 0)
4430 DO_SMLAX(SMLALTT, 2, 1, 1)
4431 
4432 #undef DO_SMLAX
4433 
op_smlawx(DisasContext * s,arg_rrrr * a,bool add,bool mt)4434 static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt)
4435 {
4436     TCGv_i32 t0, t1;
4437 
4438     if (!ENABLE_ARCH_5TE) {
4439         return false;
4440     }
4441 
4442     t0 = load_reg(s, a->rn);
4443     t1 = load_reg(s, a->rm);
4444     /*
4445      * Since the nominal result is product<47:16>, shift the 16-bit
4446      * input up by 16 bits, so that the result is at product<63:32>.
4447      */
4448     if (mt) {
4449         tcg_gen_andi_i32(t1, t1, 0xffff0000);
4450     } else {
4451         tcg_gen_shli_i32(t1, t1, 16);
4452     }
4453     tcg_gen_muls2_i32(t0, t1, t0, t1);
4454     if (add) {
4455         t0 = load_reg(s, a->ra);
4456         gen_helper_add_setq(t1, tcg_env, t1, t0);
4457     }
4458     store_reg(s, a->rd, t1);
4459     return true;
4460 }
4461 
4462 #define DO_SMLAWX(NAME, add, mt) \
4463 static bool trans_##NAME(DisasContext *s, arg_rrrr *a)     \
4464 {                                                          \
4465     return op_smlawx(s, a, add, mt);                       \
4466 }
4467 
4468 DO_SMLAWX(SMULWB, 0, 0)
4469 DO_SMLAWX(SMULWT, 0, 1)
4470 DO_SMLAWX(SMLAWB, 1, 0)
4471 DO_SMLAWX(SMLAWT, 1, 1)
4472 
4473 #undef DO_SMLAWX
4474 
4475 /*
4476  * MSR (immediate) and hints
4477  */
4478 
trans_YIELD(DisasContext * s,arg_YIELD * a)4479 static bool trans_YIELD(DisasContext *s, arg_YIELD *a)
4480 {
4481     /*
4482      * When running single-threaded TCG code, use the helper to ensure that
4483      * the next round-robin scheduled vCPU gets a crack.  When running in
4484      * MTTCG we don't generate jumps to the helper as it won't affect the
4485      * scheduling of other vCPUs.
4486      */
4487     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4488         gen_update_pc(s, curr_insn_len(s));
4489         s->base.is_jmp = DISAS_YIELD;
4490     }
4491     return true;
4492 }
4493 
trans_WFE(DisasContext * s,arg_WFE * a)4494 static bool trans_WFE(DisasContext *s, arg_WFE *a)
4495 {
4496     /*
4497      * When running single-threaded TCG code, use the helper to ensure that
4498      * the next round-robin scheduled vCPU gets a crack.  In MTTCG mode we
4499      * just skip this instruction.  Currently the SEV/SEVL instructions,
4500      * which are *one* of many ways to wake the CPU from WFE, are not
4501      * implemented so we can't sleep like WFI does.
4502      */
4503     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4504         gen_update_pc(s, curr_insn_len(s));
4505         s->base.is_jmp = DISAS_WFE;
4506     }
4507     return true;
4508 }
4509 
trans_WFI(DisasContext * s,arg_WFI * a)4510 static bool trans_WFI(DisasContext *s, arg_WFI *a)
4511 {
4512     /* For WFI, halt the vCPU until an IRQ. */
4513     gen_update_pc(s, curr_insn_len(s));
4514     s->base.is_jmp = DISAS_WFI;
4515     return true;
4516 }
4517 
trans_ESB(DisasContext * s,arg_ESB * a)4518 static bool trans_ESB(DisasContext *s, arg_ESB *a)
4519 {
4520     /*
4521      * For M-profile, minimal-RAS ESB can be a NOP.
4522      * Without RAS, we must implement this as NOP.
4523      */
4524     if (!arm_dc_feature(s, ARM_FEATURE_M) && dc_isar_feature(aa32_ras, s)) {
4525         /*
4526          * QEMU does not have a source of physical SErrors,
4527          * so we are only concerned with virtual SErrors.
4528          * The pseudocode in the ARM for this case is
4529          *   if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
4530          *      AArch32.vESBOperation();
4531          * Most of the condition can be evaluated at translation time.
4532          * Test for EL2 present, and defer test for SEL2 to runtime.
4533          */
4534         if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
4535             gen_helper_vesb(tcg_env);
4536         }
4537     }
4538     return true;
4539 }
4540 
trans_NOP(DisasContext * s,arg_NOP * a)4541 static bool trans_NOP(DisasContext *s, arg_NOP *a)
4542 {
4543     return true;
4544 }
4545 
trans_MSR_imm(DisasContext * s,arg_MSR_imm * a)4546 static bool trans_MSR_imm(DisasContext *s, arg_MSR_imm *a)
4547 {
4548     uint32_t val = ror32(a->imm, a->rot * 2);
4549     uint32_t mask = msr_mask(s, a->mask, a->r);
4550 
4551     if (gen_set_psr_im(s, mask, a->r, val)) {
4552         unallocated_encoding(s);
4553     }
4554     return true;
4555 }
4556 
4557 /*
4558  * Cyclic Redundancy Check
4559  */
4560 
op_crc32(DisasContext * s,arg_rrr * a,bool c,MemOp sz)4561 static bool op_crc32(DisasContext *s, arg_rrr *a, bool c, MemOp sz)
4562 {
4563     TCGv_i32 t1, t2, t3;
4564 
4565     if (!dc_isar_feature(aa32_crc32, s)) {
4566         return false;
4567     }
4568 
4569     t1 = load_reg(s, a->rn);
4570     t2 = load_reg(s, a->rm);
4571     switch (sz) {
4572     case MO_8:
4573         gen_uxtb(t2);
4574         break;
4575     case MO_16:
4576         gen_uxth(t2);
4577         break;
4578     case MO_32:
4579         break;
4580     default:
4581         g_assert_not_reached();
4582     }
4583     t3 = tcg_constant_i32(1 << sz);
4584     if (c) {
4585         gen_helper_crc32c(t1, t1, t2, t3);
4586     } else {
4587         gen_helper_crc32(t1, t1, t2, t3);
4588     }
4589     store_reg(s, a->rd, t1);
4590     return true;
4591 }
4592 
4593 #define DO_CRC32(NAME, c, sz) \
4594 static bool trans_##NAME(DisasContext *s, arg_rrr *a)  \
4595     { return op_crc32(s, a, c, sz); }
4596 
DO_CRC32(CRC32B,false,MO_8)4597 DO_CRC32(CRC32B, false, MO_8)
4598 DO_CRC32(CRC32H, false, MO_16)
4599 DO_CRC32(CRC32W, false, MO_32)
4600 DO_CRC32(CRC32CB, true, MO_8)
4601 DO_CRC32(CRC32CH, true, MO_16)
4602 DO_CRC32(CRC32CW, true, MO_32)
4603 
4604 #undef DO_CRC32
4605 
4606 /*
4607  * Miscellaneous instructions
4608  */
4609 
4610 static bool trans_MRS_bank(DisasContext *s, arg_MRS_bank *a)
4611 {
4612     if (arm_dc_feature(s, ARM_FEATURE_M)) {
4613         return false;
4614     }
4615     gen_mrs_banked(s, a->r, a->sysm, a->rd);
4616     return true;
4617 }
4618 
trans_MSR_bank(DisasContext * s,arg_MSR_bank * a)4619 static bool trans_MSR_bank(DisasContext *s, arg_MSR_bank *a)
4620 {
4621     if (arm_dc_feature(s, ARM_FEATURE_M)) {
4622         return false;
4623     }
4624     gen_msr_banked(s, a->r, a->sysm, a->rn);
4625     return true;
4626 }
4627 
trans_MRS_reg(DisasContext * s,arg_MRS_reg * a)4628 static bool trans_MRS_reg(DisasContext *s, arg_MRS_reg *a)
4629 {
4630     TCGv_i32 tmp;
4631 
4632     if (arm_dc_feature(s, ARM_FEATURE_M)) {
4633         return false;
4634     }
4635     if (a->r) {
4636         if (IS_USER(s)) {
4637             unallocated_encoding(s);
4638             return true;
4639         }
4640         tmp = load_cpu_field(spsr);
4641     } else {
4642         tmp = tcg_temp_new_i32();
4643         gen_helper_cpsr_read(tmp, tcg_env);
4644     }
4645     store_reg(s, a->rd, tmp);
4646     return true;
4647 }
4648 
trans_MSR_reg(DisasContext * s,arg_MSR_reg * a)4649 static bool trans_MSR_reg(DisasContext *s, arg_MSR_reg *a)
4650 {
4651     TCGv_i32 tmp;
4652     uint32_t mask = msr_mask(s, a->mask, a->r);
4653 
4654     if (arm_dc_feature(s, ARM_FEATURE_M)) {
4655         return false;
4656     }
4657     tmp = load_reg(s, a->rn);
4658     if (gen_set_psr(s, mask, a->r, tmp)) {
4659         unallocated_encoding(s);
4660     }
4661     return true;
4662 }
4663 
trans_MRS_v7m(DisasContext * s,arg_MRS_v7m * a)4664 static bool trans_MRS_v7m(DisasContext *s, arg_MRS_v7m *a)
4665 {
4666     TCGv_i32 tmp;
4667 
4668     if (!arm_dc_feature(s, ARM_FEATURE_M)) {
4669         return false;
4670     }
4671     tmp = tcg_temp_new_i32();
4672     gen_helper_v7m_mrs(tmp, tcg_env, tcg_constant_i32(a->sysm));
4673     store_reg(s, a->rd, tmp);
4674     return true;
4675 }
4676 
trans_MSR_v7m(DisasContext * s,arg_MSR_v7m * a)4677 static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a)
4678 {
4679     TCGv_i32 addr, reg;
4680 
4681     if (!arm_dc_feature(s, ARM_FEATURE_M)) {
4682         return false;
4683     }
4684     addr = tcg_constant_i32((a->mask << 10) | a->sysm);
4685     reg = load_reg(s, a->rn);
4686     gen_helper_v7m_msr(tcg_env, addr, reg);
4687     /* If we wrote to CONTROL, the EL might have changed */
4688     gen_rebuild_hflags(s, true);
4689     gen_lookup_tb(s);
4690     return true;
4691 }
4692 
trans_BX(DisasContext * s,arg_BX * a)4693 static bool trans_BX(DisasContext *s, arg_BX *a)
4694 {
4695     if (!ENABLE_ARCH_4T) {
4696         return false;
4697     }
4698     gen_bx_excret(s, load_reg(s, a->rm));
4699     return true;
4700 }
4701 
trans_BXJ(DisasContext * s,arg_BXJ * a)4702 static bool trans_BXJ(DisasContext *s, arg_BXJ *a)
4703 {
4704     if (!ENABLE_ARCH_5J || arm_dc_feature(s, ARM_FEATURE_M)) {
4705         return false;
4706     }
4707     /*
4708      * v7A allows BXJ to be trapped via HSTR.TJDBX. We don't waste a
4709      * TBFLAGS bit on a basically-never-happens case, so call a helper
4710      * function to check for the trap and raise the exception if needed
4711      * (passing it the register number for the syndrome value).
4712      * v8A doesn't have this HSTR bit.
4713      */
4714     if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4715         arm_dc_feature(s, ARM_FEATURE_EL2) &&
4716         s->current_el < 2 && s->ns) {
4717         gen_helper_check_bxj_trap(tcg_env, tcg_constant_i32(a->rm));
4718     }
4719     /* Trivial implementation equivalent to bx.  */
4720     gen_bx(s, load_reg(s, a->rm));
4721     return true;
4722 }
4723 
trans_BLX_r(DisasContext * s,arg_BLX_r * a)4724 static bool trans_BLX_r(DisasContext *s, arg_BLX_r *a)
4725 {
4726     TCGv_i32 tmp;
4727 
4728     if (!ENABLE_ARCH_5) {
4729         return false;
4730     }
4731     tmp = load_reg(s, a->rm);
4732     gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
4733     gen_bx(s, tmp);
4734     return true;
4735 }
4736 
4737 /*
4738  * BXNS/BLXNS: only exist for v8M with the security extensions,
4739  * and always UNDEF if NonSecure.  We don't implement these in
4740  * the user-only mode either (in theory you can use them from
4741  * Secure User mode but they are too tied in to system emulation).
4742  */
trans_BXNS(DisasContext * s,arg_BXNS * a)4743 static bool trans_BXNS(DisasContext *s, arg_BXNS *a)
4744 {
4745     if (!s->v8m_secure || IS_USER_ONLY) {
4746         unallocated_encoding(s);
4747     } else {
4748         gen_bxns(s, a->rm);
4749     }
4750     return true;
4751 }
4752 
trans_BLXNS(DisasContext * s,arg_BLXNS * a)4753 static bool trans_BLXNS(DisasContext *s, arg_BLXNS *a)
4754 {
4755     if (!s->v8m_secure || IS_USER_ONLY) {
4756         unallocated_encoding(s);
4757     } else {
4758         gen_blxns(s, a->rm);
4759     }
4760     return true;
4761 }
4762 
trans_CLZ(DisasContext * s,arg_CLZ * a)4763 static bool trans_CLZ(DisasContext *s, arg_CLZ *a)
4764 {
4765     TCGv_i32 tmp;
4766 
4767     if (!ENABLE_ARCH_5) {
4768         return false;
4769     }
4770     tmp = load_reg(s, a->rm);
4771     tcg_gen_clzi_i32(tmp, tmp, 32);
4772     store_reg(s, a->rd, tmp);
4773     return true;
4774 }
4775 
trans_ERET(DisasContext * s,arg_ERET * a)4776 static bool trans_ERET(DisasContext *s, arg_ERET *a)
4777 {
4778     TCGv_i32 tmp;
4779 
4780     if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
4781         return false;
4782     }
4783     if (IS_USER(s)) {
4784         unallocated_encoding(s);
4785         return true;
4786     }
4787     if (s->current_el == 2) {
4788         /* ERET from Hyp uses ELR_Hyp, not LR */
4789         tmp = load_cpu_field_low32(elr_el[2]);
4790     } else {
4791         tmp = load_reg(s, 14);
4792     }
4793     gen_exception_return(s, tmp);
4794     return true;
4795 }
4796 
trans_HLT(DisasContext * s,arg_HLT * a)4797 static bool trans_HLT(DisasContext *s, arg_HLT *a)
4798 {
4799     gen_hlt(s, a->imm);
4800     return true;
4801 }
4802 
trans_BKPT(DisasContext * s,arg_BKPT * a)4803 static bool trans_BKPT(DisasContext *s, arg_BKPT *a)
4804 {
4805     if (!ENABLE_ARCH_5) {
4806         return false;
4807     }
4808     /* BKPT is OK with ECI set and leaves it untouched */
4809     s->eci_handled = true;
4810     if (arm_dc_feature(s, ARM_FEATURE_M) &&
4811         semihosting_enabled(s->current_el == 0) &&
4812         (a->imm == 0xab)) {
4813         gen_exception_internal_insn(s, EXCP_SEMIHOST);
4814     } else {
4815         gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false));
4816     }
4817     return true;
4818 }
4819 
trans_HVC(DisasContext * s,arg_HVC * a)4820 static bool trans_HVC(DisasContext *s, arg_HVC *a)
4821 {
4822     if (!ENABLE_ARCH_7 || arm_dc_feature(s, ARM_FEATURE_M)) {
4823         return false;
4824     }
4825     if (IS_USER(s)) {
4826         unallocated_encoding(s);
4827     } else {
4828         gen_hvc(s, a->imm);
4829     }
4830     return true;
4831 }
4832 
trans_SMC(DisasContext * s,arg_SMC * a)4833 static bool trans_SMC(DisasContext *s, arg_SMC *a)
4834 {
4835     if (!ENABLE_ARCH_6K || arm_dc_feature(s, ARM_FEATURE_M)) {
4836         return false;
4837     }
4838     if (IS_USER(s)) {
4839         unallocated_encoding(s);
4840     } else {
4841         gen_smc(s);
4842     }
4843     return true;
4844 }
4845 
trans_SG(DisasContext * s,arg_SG * a)4846 static bool trans_SG(DisasContext *s, arg_SG *a)
4847 {
4848     if (!arm_dc_feature(s, ARM_FEATURE_M) ||
4849         !arm_dc_feature(s, ARM_FEATURE_V8)) {
4850         return false;
4851     }
4852     /*
4853      * SG (v8M only)
4854      * The bulk of the behaviour for this instruction is implemented
4855      * in v7m_handle_execute_nsc(), which deals with the insn when
4856      * it is executed by a CPU in non-secure state from memory
4857      * which is Secure & NonSecure-Callable.
4858      * Here we only need to handle the remaining cases:
4859      *  * in NS memory (including the "security extension not
4860      *    implemented" case) : NOP
4861      *  * in S memory but CPU already secure (clear IT bits)
4862      * We know that the attribute for the memory this insn is
4863      * in must match the current CPU state, because otherwise
4864      * get_phys_addr_pmsav8 would have generated an exception.
4865      */
4866     if (s->v8m_secure) {
4867         /* Like the IT insn, we don't need to generate any code */
4868         s->condexec_cond = 0;
4869         s->condexec_mask = 0;
4870     }
4871     return true;
4872 }
4873 
trans_TT(DisasContext * s,arg_TT * a)4874 static bool trans_TT(DisasContext *s, arg_TT *a)
4875 {
4876     TCGv_i32 addr, tmp;
4877 
4878     if (!arm_dc_feature(s, ARM_FEATURE_M) ||
4879         !arm_dc_feature(s, ARM_FEATURE_V8)) {
4880         return false;
4881     }
4882     if (a->rd == 13 || a->rd == 15 || a->rn == 15) {
4883         /* We UNDEF for these UNPREDICTABLE cases */
4884         unallocated_encoding(s);
4885         return true;
4886     }
4887     if (a->A && !s->v8m_secure) {
4888         /* This case is UNDEFINED.  */
4889         unallocated_encoding(s);
4890         return true;
4891     }
4892 
4893     addr = load_reg(s, a->rn);
4894     tmp = tcg_temp_new_i32();
4895     gen_helper_v7m_tt(tmp, tcg_env, addr, tcg_constant_i32((a->A << 1) | a->T));
4896     store_reg(s, a->rd, tmp);
4897     return true;
4898 }
4899 
4900 /*
4901  * Load/store register index
4902  */
4903 
make_issinfo(DisasContext * s,int rd,bool p,bool w)4904 static ISSInfo make_issinfo(DisasContext *s, int rd, bool p, bool w)
4905 {
4906     ISSInfo ret;
4907 
4908     /* ISS not valid if writeback */
4909     if (p && !w) {
4910         ret = rd;
4911         if (curr_insn_len(s) == 2) {
4912             ret |= ISSIs16Bit;
4913         }
4914     } else {
4915         ret = ISSInvalid;
4916     }
4917     return ret;
4918 }
4919 
op_addr_rr_pre(DisasContext * s,arg_ldst_rr * a)4920 static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a)
4921 {
4922     TCGv_i32 addr = load_reg(s, a->rn);
4923 
4924     if (s->v8m_stackcheck && a->rn == 13 && a->w) {
4925         gen_helper_v8m_stackcheck(tcg_env, addr);
4926     }
4927 
4928     if (a->p) {
4929         TCGv_i32 ofs = load_reg(s, a->rm);
4930         gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
4931         if (a->u) {
4932             tcg_gen_add_i32(addr, addr, ofs);
4933         } else {
4934             tcg_gen_sub_i32(addr, addr, ofs);
4935         }
4936     }
4937     return addr;
4938 }
4939 
op_addr_rr_post(DisasContext * s,arg_ldst_rr * a,TCGv_i32 addr,int address_offset)4940 static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a,
4941                             TCGv_i32 addr, int address_offset)
4942 {
4943     if (!a->p) {
4944         TCGv_i32 ofs = load_reg(s, a->rm);
4945         gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
4946         if (a->u) {
4947             tcg_gen_add_i32(addr, addr, ofs);
4948         } else {
4949             tcg_gen_sub_i32(addr, addr, ofs);
4950         }
4951     } else if (!a->w) {
4952         return;
4953     }
4954     tcg_gen_addi_i32(addr, addr, address_offset);
4955     store_reg(s, a->rn, addr);
4956 }
4957 
op_load_rr(DisasContext * s,arg_ldst_rr * a,MemOp mop,int mem_idx)4958 static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
4959                        MemOp mop, int mem_idx)
4960 {
4961     ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
4962     TCGv_i32 addr, tmp;
4963 
4964     addr = op_addr_rr_pre(s, a);
4965 
4966     tmp = tcg_temp_new_i32();
4967     gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
4968     disas_set_da_iss(s, mop, issinfo);
4969 
4970     /*
4971      * Perform base writeback before the loaded value to
4972      * ensure correct behavior with overlapping index registers.
4973      */
4974     op_addr_rr_post(s, a, addr, 0);
4975     store_reg_from_load(s, a->rt, tmp);
4976     return true;
4977 }
4978 
op_store_rr(DisasContext * s,arg_ldst_rr * a,MemOp mop,int mem_idx)4979 static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
4980                         MemOp mop, int mem_idx)
4981 {
4982     ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
4983     TCGv_i32 addr, tmp;
4984 
4985     /*
4986      * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
4987      * is either UNPREDICTABLE or has defined behaviour
4988      */
4989     if (s->thumb && a->rn == 15) {
4990         return false;
4991     }
4992 
4993     addr = op_addr_rr_pre(s, a);
4994 
4995     tmp = load_reg(s, a->rt);
4996     gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
4997     disas_set_da_iss(s, mop, issinfo);
4998 
4999     op_addr_rr_post(s, a, addr, 0);
5000     return true;
5001 }
5002 
trans_LDRD_rr(DisasContext * s,arg_ldst_rr * a)5003 static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
5004 {
5005     int mem_idx = get_mem_index(s);
5006     TCGv_i32 addr, tmp;
5007 
5008     if (!ENABLE_ARCH_5TE) {
5009         return false;
5010     }
5011     if (a->rt & 1) {
5012         unallocated_encoding(s);
5013         return true;
5014     }
5015     addr = op_addr_rr_pre(s, a);
5016 
5017     tmp = tcg_temp_new_i32();
5018     gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
5019     store_reg(s, a->rt, tmp);
5020 
5021     tcg_gen_addi_i32(addr, addr, 4);
5022 
5023     tmp = tcg_temp_new_i32();
5024     gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
5025     store_reg(s, a->rt + 1, tmp);
5026 
5027     /* LDRD w/ base writeback is undefined if the registers overlap.  */
5028     op_addr_rr_post(s, a, addr, -4);
5029     return true;
5030 }
5031 
trans_STRD_rr(DisasContext * s,arg_ldst_rr * a)5032 static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
5033 {
5034     int mem_idx = get_mem_index(s);
5035     TCGv_i32 addr, tmp;
5036 
5037     if (!ENABLE_ARCH_5TE) {
5038         return false;
5039     }
5040     if (a->rt & 1) {
5041         unallocated_encoding(s);
5042         return true;
5043     }
5044     addr = op_addr_rr_pre(s, a);
5045 
5046     tmp = load_reg(s, a->rt);
5047     gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
5048 
5049     tcg_gen_addi_i32(addr, addr, 4);
5050 
5051     tmp = load_reg(s, a->rt + 1);
5052     gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
5053 
5054     op_addr_rr_post(s, a, addr, -4);
5055     return true;
5056 }
5057 
5058 /*
5059  * Load/store immediate index
5060  */
5061 
op_addr_ri_pre(DisasContext * s,arg_ldst_ri * a)5062 static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a)
5063 {
5064     int ofs = a->imm;
5065 
5066     if (!a->u) {
5067         ofs = -ofs;
5068     }
5069 
5070     if (s->v8m_stackcheck && a->rn == 13 && a->w) {
5071         /*
5072          * Stackcheck. Here we know 'addr' is the current SP;
5073          * U is set if we're moving SP up, else down. It is
5074          * UNKNOWN whether the limit check triggers when SP starts
5075          * below the limit and ends up above it; we chose to do so.
5076          */
5077         if (!a->u) {
5078             TCGv_i32 newsp = tcg_temp_new_i32();
5079             tcg_gen_addi_i32(newsp, cpu_R[13], ofs);
5080             gen_helper_v8m_stackcheck(tcg_env, newsp);
5081         } else {
5082             gen_helper_v8m_stackcheck(tcg_env, cpu_R[13]);
5083         }
5084     }
5085 
5086     return add_reg_for_lit(s, a->rn, a->p ? ofs : 0);
5087 }
5088 
op_addr_ri_post(DisasContext * s,arg_ldst_ri * a,TCGv_i32 addr,int address_offset)5089 static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a,
5090                             TCGv_i32 addr, int address_offset)
5091 {
5092     if (!a->p) {
5093         if (a->u) {
5094             address_offset += a->imm;
5095         } else {
5096             address_offset -= a->imm;
5097         }
5098     } else if (!a->w) {
5099         return;
5100     }
5101     tcg_gen_addi_i32(addr, addr, address_offset);
5102     store_reg(s, a->rn, addr);
5103 }
5104 
op_load_ri(DisasContext * s,arg_ldst_ri * a,MemOp mop,int mem_idx)5105 static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
5106                        MemOp mop, int mem_idx)
5107 {
5108     ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
5109     TCGv_i32 addr, tmp;
5110 
5111     addr = op_addr_ri_pre(s, a);
5112 
5113     tmp = tcg_temp_new_i32();
5114     gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
5115     disas_set_da_iss(s, mop, issinfo);
5116 
5117     /*
5118      * Perform base writeback before the loaded value to
5119      * ensure correct behavior with overlapping index registers.
5120      */
5121     op_addr_ri_post(s, a, addr, 0);
5122     store_reg_from_load(s, a->rt, tmp);
5123     return true;
5124 }
5125 
op_store_ri(DisasContext * s,arg_ldst_ri * a,MemOp mop,int mem_idx)5126 static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
5127                         MemOp mop, int mem_idx)
5128 {
5129     ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
5130     TCGv_i32 addr, tmp;
5131 
5132     /*
5133      * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
5134      * is either UNPREDICTABLE or has defined behaviour
5135      */
5136     if (s->thumb && a->rn == 15) {
5137         return false;
5138     }
5139 
5140     addr = op_addr_ri_pre(s, a);
5141 
5142     tmp = load_reg(s, a->rt);
5143     gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
5144     disas_set_da_iss(s, mop, issinfo);
5145 
5146     op_addr_ri_post(s, a, addr, 0);
5147     return true;
5148 }
5149 
op_ldrd_ri(DisasContext * s,arg_ldst_ri * a,int rt2)5150 static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
5151 {
5152     int mem_idx = get_mem_index(s);
5153     TCGv_i32 addr, tmp;
5154 
5155     addr = op_addr_ri_pre(s, a);
5156 
5157     tmp = tcg_temp_new_i32();
5158     gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
5159     store_reg(s, a->rt, tmp);
5160 
5161     tcg_gen_addi_i32(addr, addr, 4);
5162 
5163     tmp = tcg_temp_new_i32();
5164     gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
5165     store_reg(s, rt2, tmp);
5166 
5167     /* LDRD w/ base writeback is undefined if the registers overlap.  */
5168     op_addr_ri_post(s, a, addr, -4);
5169     return true;
5170 }
5171 
trans_LDRD_ri_a32(DisasContext * s,arg_ldst_ri * a)5172 static bool trans_LDRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
5173 {
5174     if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
5175         return false;
5176     }
5177     return op_ldrd_ri(s, a, a->rt + 1);
5178 }
5179 
trans_LDRD_ri_t32(DisasContext * s,arg_ldst_ri2 * a)5180 static bool trans_LDRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
5181 {
5182     arg_ldst_ri b = {
5183         .u = a->u, .w = a->w, .p = a->p,
5184         .rn = a->rn, .rt = a->rt, .imm = a->imm
5185     };
5186     return op_ldrd_ri(s, &b, a->rt2);
5187 }
5188 
op_strd_ri(DisasContext * s,arg_ldst_ri * a,int rt2)5189 static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
5190 {
5191     int mem_idx = get_mem_index(s);
5192     TCGv_i32 addr, tmp;
5193 
5194     addr = op_addr_ri_pre(s, a);
5195 
5196     tmp = load_reg(s, a->rt);
5197     gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
5198 
5199     tcg_gen_addi_i32(addr, addr, 4);
5200 
5201     tmp = load_reg(s, rt2);
5202     gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
5203 
5204     op_addr_ri_post(s, a, addr, -4);
5205     return true;
5206 }
5207 
trans_STRD_ri_a32(DisasContext * s,arg_ldst_ri * a)5208 static bool trans_STRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
5209 {
5210     if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
5211         return false;
5212     }
5213     return op_strd_ri(s, a, a->rt + 1);
5214 }
5215 
trans_STRD_ri_t32(DisasContext * s,arg_ldst_ri2 * a)5216 static bool trans_STRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
5217 {
5218     arg_ldst_ri b = {
5219         .u = a->u, .w = a->w, .p = a->p,
5220         .rn = a->rn, .rt = a->rt, .imm = a->imm
5221     };
5222     return op_strd_ri(s, &b, a->rt2);
5223 }
5224 
5225 #define DO_LDST(NAME, WHICH, MEMOP) \
5226 static bool trans_##NAME##_ri(DisasContext *s, arg_ldst_ri *a)        \
5227 {                                                                     \
5228     return op_##WHICH##_ri(s, a, MEMOP, get_mem_index(s));            \
5229 }                                                                     \
5230 static bool trans_##NAME##T_ri(DisasContext *s, arg_ldst_ri *a)       \
5231 {                                                                     \
5232     return op_##WHICH##_ri(s, a, MEMOP, get_a32_user_mem_index(s));   \
5233 }                                                                     \
5234 static bool trans_##NAME##_rr(DisasContext *s, arg_ldst_rr *a)        \
5235 {                                                                     \
5236     return op_##WHICH##_rr(s, a, MEMOP, get_mem_index(s));            \
5237 }                                                                     \
5238 static bool trans_##NAME##T_rr(DisasContext *s, arg_ldst_rr *a)       \
5239 {                                                                     \
5240     return op_##WHICH##_rr(s, a, MEMOP, get_a32_user_mem_index(s));   \
5241 }
5242 
DO_LDST(LDR,load,MO_UL)5243 DO_LDST(LDR, load, MO_UL)
5244 DO_LDST(LDRB, load, MO_UB)
5245 DO_LDST(LDRH, load, MO_UW)
5246 DO_LDST(LDRSB, load, MO_SB)
5247 DO_LDST(LDRSH, load, MO_SW)
5248 
5249 DO_LDST(STR, store, MO_UL)
5250 DO_LDST(STRB, store, MO_UB)
5251 DO_LDST(STRH, store, MO_UW)
5252 
5253 #undef DO_LDST
5254 
5255 /*
5256  * Synchronization primitives
5257  */
5258 
5259 static bool op_swp(DisasContext *s, arg_SWP *a, MemOp opc)
5260 {
5261     TCGv_i32 addr, tmp;
5262     TCGv taddr;
5263 
5264     opc |= s->be_data;
5265     addr = load_reg(s, a->rn);
5266     taddr = gen_aa32_addr(s, addr, opc);
5267 
5268     tmp = load_reg(s, a->rt2);
5269     tcg_gen_atomic_xchg_i32(tmp, taddr, tmp, get_mem_index(s), opc);
5270 
5271     store_reg(s, a->rt, tmp);
5272     return true;
5273 }
5274 
trans_SWP(DisasContext * s,arg_SWP * a)5275 static bool trans_SWP(DisasContext *s, arg_SWP *a)
5276 {
5277     return op_swp(s, a, MO_UL | MO_ALIGN);
5278 }
5279 
trans_SWPB(DisasContext * s,arg_SWP * a)5280 static bool trans_SWPB(DisasContext *s, arg_SWP *a)
5281 {
5282     return op_swp(s, a, MO_UB);
5283 }
5284 
5285 /*
5286  * Load/Store Exclusive and Load-Acquire/Store-Release
5287  */
5288 
op_strex(DisasContext * s,arg_STREX * a,MemOp mop,bool rel)5289 static bool op_strex(DisasContext *s, arg_STREX *a, MemOp mop, bool rel)
5290 {
5291     TCGv_i32 addr;
5292     /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
5293     bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
5294 
5295     /* We UNDEF for these UNPREDICTABLE cases.  */
5296     if (a->rd == 15 || a->rn == 15 || a->rt == 15
5297         || a->rd == a->rn || a->rd == a->rt
5298         || (!v8a && s->thumb && (a->rd == 13 || a->rt == 13))
5299         || (mop == MO_64
5300             && (a->rt2 == 15
5301                 || a->rd == a->rt2
5302                 || (!v8a && s->thumb && a->rt2 == 13)))) {
5303         unallocated_encoding(s);
5304         return true;
5305     }
5306 
5307     if (rel) {
5308         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
5309     }
5310 
5311     addr = tcg_temp_new_i32();
5312     load_reg_var(s, addr, a->rn);
5313     tcg_gen_addi_i32(addr, addr, a->imm);
5314 
5315     gen_store_exclusive(s, a->rd, a->rt, a->rt2, addr, mop);
5316     return true;
5317 }
5318 
trans_STREX(DisasContext * s,arg_STREX * a)5319 static bool trans_STREX(DisasContext *s, arg_STREX *a)
5320 {
5321     if (!ENABLE_ARCH_6) {
5322         return false;
5323     }
5324     return op_strex(s, a, MO_32, false);
5325 }
5326 
trans_STREXD_a32(DisasContext * s,arg_STREX * a)5327 static bool trans_STREXD_a32(DisasContext *s, arg_STREX *a)
5328 {
5329     if (!ENABLE_ARCH_6K) {
5330         return false;
5331     }
5332     /* We UNDEF for these UNPREDICTABLE cases.  */
5333     if (a->rt & 1) {
5334         unallocated_encoding(s);
5335         return true;
5336     }
5337     a->rt2 = a->rt + 1;
5338     return op_strex(s, a, MO_64, false);
5339 }
5340 
trans_STREXD_t32(DisasContext * s,arg_STREX * a)5341 static bool trans_STREXD_t32(DisasContext *s, arg_STREX *a)
5342 {
5343     return op_strex(s, a, MO_64, false);
5344 }
5345 
trans_STREXB(DisasContext * s,arg_STREX * a)5346 static bool trans_STREXB(DisasContext *s, arg_STREX *a)
5347 {
5348     if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
5349         return false;
5350     }
5351     return op_strex(s, a, MO_8, false);
5352 }
5353 
trans_STREXH(DisasContext * s,arg_STREX * a)5354 static bool trans_STREXH(DisasContext *s, arg_STREX *a)
5355 {
5356     if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
5357         return false;
5358     }
5359     return op_strex(s, a, MO_16, false);
5360 }
5361 
trans_STLEX(DisasContext * s,arg_STREX * a)5362 static bool trans_STLEX(DisasContext *s, arg_STREX *a)
5363 {
5364     if (!ENABLE_ARCH_8) {
5365         return false;
5366     }
5367     return op_strex(s, a, MO_32, true);
5368 }
5369 
trans_STLEXD_a32(DisasContext * s,arg_STREX * a)5370 static bool trans_STLEXD_a32(DisasContext *s, arg_STREX *a)
5371 {
5372     if (!ENABLE_ARCH_8) {
5373         return false;
5374     }
5375     /* We UNDEF for these UNPREDICTABLE cases.  */
5376     if (a->rt & 1) {
5377         unallocated_encoding(s);
5378         return true;
5379     }
5380     a->rt2 = a->rt + 1;
5381     return op_strex(s, a, MO_64, true);
5382 }
5383 
trans_STLEXD_t32(DisasContext * s,arg_STREX * a)5384 static bool trans_STLEXD_t32(DisasContext *s, arg_STREX *a)
5385 {
5386     if (!ENABLE_ARCH_8) {
5387         return false;
5388     }
5389     return op_strex(s, a, MO_64, true);
5390 }
5391 
trans_STLEXB(DisasContext * s,arg_STREX * a)5392 static bool trans_STLEXB(DisasContext *s, arg_STREX *a)
5393 {
5394     if (!ENABLE_ARCH_8) {
5395         return false;
5396     }
5397     return op_strex(s, a, MO_8, true);
5398 }
5399 
trans_STLEXH(DisasContext * s,arg_STREX * a)5400 static bool trans_STLEXH(DisasContext *s, arg_STREX *a)
5401 {
5402     if (!ENABLE_ARCH_8) {
5403         return false;
5404     }
5405     return op_strex(s, a, MO_16, true);
5406 }
5407 
op_stl(DisasContext * s,arg_STL * a,MemOp mop)5408 static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
5409 {
5410     TCGv_i32 addr, tmp;
5411 
5412     if (!ENABLE_ARCH_8) {
5413         return false;
5414     }
5415     /* We UNDEF for these UNPREDICTABLE cases.  */
5416     if (a->rn == 15 || a->rt == 15) {
5417         unallocated_encoding(s);
5418         return true;
5419     }
5420 
5421     addr = load_reg(s, a->rn);
5422     tmp = load_reg(s, a->rt);
5423     tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
5424     gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
5425     disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
5426 
5427     return true;
5428 }
5429 
trans_STL(DisasContext * s,arg_STL * a)5430 static bool trans_STL(DisasContext *s, arg_STL *a)
5431 {
5432     return op_stl(s, a, MO_UL);
5433 }
5434 
trans_STLB(DisasContext * s,arg_STL * a)5435 static bool trans_STLB(DisasContext *s, arg_STL *a)
5436 {
5437     return op_stl(s, a, MO_UB);
5438 }
5439 
trans_STLH(DisasContext * s,arg_STL * a)5440 static bool trans_STLH(DisasContext *s, arg_STL *a)
5441 {
5442     return op_stl(s, a, MO_UW);
5443 }
5444 
op_ldrex(DisasContext * s,arg_LDREX * a,MemOp mop,bool acq)5445 static bool op_ldrex(DisasContext *s, arg_LDREX *a, MemOp mop, bool acq)
5446 {
5447     TCGv_i32 addr;
5448     /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
5449     bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
5450 
5451     /* We UNDEF for these UNPREDICTABLE cases.  */
5452     if (a->rn == 15 || a->rt == 15
5453         || (!v8a && s->thumb && a->rt == 13)
5454         || (mop == MO_64
5455             && (a->rt2 == 15 || a->rt == a->rt2
5456                 || (!v8a && s->thumb && a->rt2 == 13)))) {
5457         unallocated_encoding(s);
5458         return true;
5459     }
5460 
5461     addr = tcg_temp_new_i32();
5462     load_reg_var(s, addr, a->rn);
5463     tcg_gen_addi_i32(addr, addr, a->imm);
5464 
5465     gen_load_exclusive(s, a->rt, a->rt2, addr, mop);
5466 
5467     if (acq) {
5468         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
5469     }
5470     return true;
5471 }
5472 
trans_LDREX(DisasContext * s,arg_LDREX * a)5473 static bool trans_LDREX(DisasContext *s, arg_LDREX *a)
5474 {
5475     if (!ENABLE_ARCH_6) {
5476         return false;
5477     }
5478     return op_ldrex(s, a, MO_32, false);
5479 }
5480 
trans_LDREXD_a32(DisasContext * s,arg_LDREX * a)5481 static bool trans_LDREXD_a32(DisasContext *s, arg_LDREX *a)
5482 {
5483     if (!ENABLE_ARCH_6K) {
5484         return false;
5485     }
5486     /* We UNDEF for these UNPREDICTABLE cases.  */
5487     if (a->rt & 1) {
5488         unallocated_encoding(s);
5489         return true;
5490     }
5491     a->rt2 = a->rt + 1;
5492     return op_ldrex(s, a, MO_64, false);
5493 }
5494 
trans_LDREXD_t32(DisasContext * s,arg_LDREX * a)5495 static bool trans_LDREXD_t32(DisasContext *s, arg_LDREX *a)
5496 {
5497     return op_ldrex(s, a, MO_64, false);
5498 }
5499 
trans_LDREXB(DisasContext * s,arg_LDREX * a)5500 static bool trans_LDREXB(DisasContext *s, arg_LDREX *a)
5501 {
5502     if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
5503         return false;
5504     }
5505     return op_ldrex(s, a, MO_8, false);
5506 }
5507 
trans_LDREXH(DisasContext * s,arg_LDREX * a)5508 static bool trans_LDREXH(DisasContext *s, arg_LDREX *a)
5509 {
5510     if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
5511         return false;
5512     }
5513     return op_ldrex(s, a, MO_16, false);
5514 }
5515 
trans_LDAEX(DisasContext * s,arg_LDREX * a)5516 static bool trans_LDAEX(DisasContext *s, arg_LDREX *a)
5517 {
5518     if (!ENABLE_ARCH_8) {
5519         return false;
5520     }
5521     return op_ldrex(s, a, MO_32, true);
5522 }
5523 
trans_LDAEXD_a32(DisasContext * s,arg_LDREX * a)5524 static bool trans_LDAEXD_a32(DisasContext *s, arg_LDREX *a)
5525 {
5526     if (!ENABLE_ARCH_8) {
5527         return false;
5528     }
5529     /* We UNDEF for these UNPREDICTABLE cases.  */
5530     if (a->rt & 1) {
5531         unallocated_encoding(s);
5532         return true;
5533     }
5534     a->rt2 = a->rt + 1;
5535     return op_ldrex(s, a, MO_64, true);
5536 }
5537 
trans_LDAEXD_t32(DisasContext * s,arg_LDREX * a)5538 static bool trans_LDAEXD_t32(DisasContext *s, arg_LDREX *a)
5539 {
5540     if (!ENABLE_ARCH_8) {
5541         return false;
5542     }
5543     return op_ldrex(s, a, MO_64, true);
5544 }
5545 
trans_LDAEXB(DisasContext * s,arg_LDREX * a)5546 static bool trans_LDAEXB(DisasContext *s, arg_LDREX *a)
5547 {
5548     if (!ENABLE_ARCH_8) {
5549         return false;
5550     }
5551     return op_ldrex(s, a, MO_8, true);
5552 }
5553 
trans_LDAEXH(DisasContext * s,arg_LDREX * a)5554 static bool trans_LDAEXH(DisasContext *s, arg_LDREX *a)
5555 {
5556     if (!ENABLE_ARCH_8) {
5557         return false;
5558     }
5559     return op_ldrex(s, a, MO_16, true);
5560 }
5561 
op_lda(DisasContext * s,arg_LDA * a,MemOp mop)5562 static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
5563 {
5564     TCGv_i32 addr, tmp;
5565 
5566     if (!ENABLE_ARCH_8) {
5567         return false;
5568     }
5569     /* We UNDEF for these UNPREDICTABLE cases.  */
5570     if (a->rn == 15 || a->rt == 15) {
5571         unallocated_encoding(s);
5572         return true;
5573     }
5574 
5575     addr = load_reg(s, a->rn);
5576     tmp = tcg_temp_new_i32();
5577     gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
5578     disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
5579 
5580     store_reg(s, a->rt, tmp);
5581     tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
5582     return true;
5583 }
5584 
trans_LDA(DisasContext * s,arg_LDA * a)5585 static bool trans_LDA(DisasContext *s, arg_LDA *a)
5586 {
5587     return op_lda(s, a, MO_UL);
5588 }
5589 
trans_LDAB(DisasContext * s,arg_LDA * a)5590 static bool trans_LDAB(DisasContext *s, arg_LDA *a)
5591 {
5592     return op_lda(s, a, MO_UB);
5593 }
5594 
trans_LDAH(DisasContext * s,arg_LDA * a)5595 static bool trans_LDAH(DisasContext *s, arg_LDA *a)
5596 {
5597     return op_lda(s, a, MO_UW);
5598 }
5599 
5600 /*
5601  * Media instructions
5602  */
5603 
trans_USADA8(DisasContext * s,arg_USADA8 * a)5604 static bool trans_USADA8(DisasContext *s, arg_USADA8 *a)
5605 {
5606     TCGv_i32 t1, t2;
5607 
5608     if (!ENABLE_ARCH_6) {
5609         return false;
5610     }
5611 
5612     t1 = load_reg(s, a->rn);
5613     t2 = load_reg(s, a->rm);
5614     gen_helper_usad8(t1, t1, t2);
5615     if (a->ra != 15) {
5616         t2 = load_reg(s, a->ra);
5617         tcg_gen_add_i32(t1, t1, t2);
5618     }
5619     store_reg(s, a->rd, t1);
5620     return true;
5621 }
5622 
op_bfx(DisasContext * s,arg_UBFX * a,bool u)5623 static bool op_bfx(DisasContext *s, arg_UBFX *a, bool u)
5624 {
5625     TCGv_i32 tmp;
5626     int width = a->widthm1 + 1;
5627     int shift = a->lsb;
5628 
5629     if (!ENABLE_ARCH_6T2) {
5630         return false;
5631     }
5632     if (shift + width > 32) {
5633         /* UNPREDICTABLE; we choose to UNDEF */
5634         unallocated_encoding(s);
5635         return true;
5636     }
5637 
5638     tmp = load_reg(s, a->rn);
5639     if (u) {
5640         tcg_gen_extract_i32(tmp, tmp, shift, width);
5641     } else {
5642         tcg_gen_sextract_i32(tmp, tmp, shift, width);
5643     }
5644     store_reg(s, a->rd, tmp);
5645     return true;
5646 }
5647 
trans_SBFX(DisasContext * s,arg_SBFX * a)5648 static bool trans_SBFX(DisasContext *s, arg_SBFX *a)
5649 {
5650     return op_bfx(s, a, false);
5651 }
5652 
trans_UBFX(DisasContext * s,arg_UBFX * a)5653 static bool trans_UBFX(DisasContext *s, arg_UBFX *a)
5654 {
5655     return op_bfx(s, a, true);
5656 }
5657 
trans_BFCI(DisasContext * s,arg_BFCI * a)5658 static bool trans_BFCI(DisasContext *s, arg_BFCI *a)
5659 {
5660     int msb = a->msb, lsb = a->lsb;
5661     TCGv_i32 t_in, t_rd;
5662     int width;
5663 
5664     if (!ENABLE_ARCH_6T2) {
5665         return false;
5666     }
5667     if (msb < lsb) {
5668         /* UNPREDICTABLE; we choose to UNDEF */
5669         unallocated_encoding(s);
5670         return true;
5671     }
5672 
5673     width = msb + 1 - lsb;
5674     if (a->rn == 15) {
5675         /* BFC */
5676         t_in = tcg_constant_i32(0);
5677     } else {
5678         /* BFI */
5679         t_in = load_reg(s, a->rn);
5680     }
5681     t_rd = load_reg(s, a->rd);
5682     tcg_gen_deposit_i32(t_rd, t_rd, t_in, lsb, width);
5683     store_reg(s, a->rd, t_rd);
5684     return true;
5685 }
5686 
trans_UDF(DisasContext * s,arg_UDF * a)5687 static bool trans_UDF(DisasContext *s, arg_UDF *a)
5688 {
5689     unallocated_encoding(s);
5690     return true;
5691 }
5692 
5693 /*
5694  * Parallel addition and subtraction
5695  */
5696 
op_par_addsub(DisasContext * s,arg_rrr * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32))5697 static bool op_par_addsub(DisasContext *s, arg_rrr *a,
5698                           void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
5699 {
5700     TCGv_i32 t0, t1;
5701 
5702     if (s->thumb
5703         ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5704         : !ENABLE_ARCH_6) {
5705         return false;
5706     }
5707 
5708     t0 = load_reg(s, a->rn);
5709     t1 = load_reg(s, a->rm);
5710 
5711     gen(t0, t0, t1);
5712 
5713     store_reg(s, a->rd, t0);
5714     return true;
5715 }
5716 
op_par_addsub_ge(DisasContext * s,arg_rrr * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32,TCGv_ptr))5717 static bool op_par_addsub_ge(DisasContext *s, arg_rrr *a,
5718                              void (*gen)(TCGv_i32, TCGv_i32,
5719                                          TCGv_i32, TCGv_ptr))
5720 {
5721     TCGv_i32 t0, t1;
5722     TCGv_ptr ge;
5723 
5724     if (s->thumb
5725         ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5726         : !ENABLE_ARCH_6) {
5727         return false;
5728     }
5729 
5730     t0 = load_reg(s, a->rn);
5731     t1 = load_reg(s, a->rm);
5732 
5733     ge = tcg_temp_new_ptr();
5734     tcg_gen_addi_ptr(ge, tcg_env, offsetof(CPUARMState, GE));
5735     gen(t0, t0, t1, ge);
5736 
5737     store_reg(s, a->rd, t0);
5738     return true;
5739 }
5740 
5741 #define DO_PAR_ADDSUB(NAME, helper) \
5742 static bool trans_##NAME(DisasContext *s, arg_rrr *a)   \
5743 {                                                       \
5744     return op_par_addsub(s, a, helper);                 \
5745 }
5746 
5747 #define DO_PAR_ADDSUB_GE(NAME, helper) \
5748 static bool trans_##NAME(DisasContext *s, arg_rrr *a)   \
5749 {                                                       \
5750     return op_par_addsub_ge(s, a, helper);              \
5751 }
5752 
DO_PAR_ADDSUB_GE(SADD16,gen_helper_sadd16)5753 DO_PAR_ADDSUB_GE(SADD16, gen_helper_sadd16)
5754 DO_PAR_ADDSUB_GE(SASX, gen_helper_saddsubx)
5755 DO_PAR_ADDSUB_GE(SSAX, gen_helper_ssubaddx)
5756 DO_PAR_ADDSUB_GE(SSUB16, gen_helper_ssub16)
5757 DO_PAR_ADDSUB_GE(SADD8, gen_helper_sadd8)
5758 DO_PAR_ADDSUB_GE(SSUB8, gen_helper_ssub8)
5759 
5760 DO_PAR_ADDSUB_GE(UADD16, gen_helper_uadd16)
5761 DO_PAR_ADDSUB_GE(UASX, gen_helper_uaddsubx)
5762 DO_PAR_ADDSUB_GE(USAX, gen_helper_usubaddx)
5763 DO_PAR_ADDSUB_GE(USUB16, gen_helper_usub16)
5764 DO_PAR_ADDSUB_GE(UADD8, gen_helper_uadd8)
5765 DO_PAR_ADDSUB_GE(USUB8, gen_helper_usub8)
5766 
5767 DO_PAR_ADDSUB(QADD16, gen_helper_qadd16)
5768 DO_PAR_ADDSUB(QASX, gen_helper_qaddsubx)
5769 DO_PAR_ADDSUB(QSAX, gen_helper_qsubaddx)
5770 DO_PAR_ADDSUB(QSUB16, gen_helper_qsub16)
5771 DO_PAR_ADDSUB(QADD8, gen_helper_qadd8)
5772 DO_PAR_ADDSUB(QSUB8, gen_helper_qsub8)
5773 
5774 DO_PAR_ADDSUB(UQADD16, gen_helper_uqadd16)
5775 DO_PAR_ADDSUB(UQASX, gen_helper_uqaddsubx)
5776 DO_PAR_ADDSUB(UQSAX, gen_helper_uqsubaddx)
5777 DO_PAR_ADDSUB(UQSUB16, gen_helper_uqsub16)
5778 DO_PAR_ADDSUB(UQADD8, gen_helper_uqadd8)
5779 DO_PAR_ADDSUB(UQSUB8, gen_helper_uqsub8)
5780 
5781 DO_PAR_ADDSUB(SHADD16, gen_helper_shadd16)
5782 DO_PAR_ADDSUB(SHASX, gen_helper_shaddsubx)
5783 DO_PAR_ADDSUB(SHSAX, gen_helper_shsubaddx)
5784 DO_PAR_ADDSUB(SHSUB16, gen_helper_shsub16)
5785 DO_PAR_ADDSUB(SHADD8, gen_helper_shadd8)
5786 DO_PAR_ADDSUB(SHSUB8, gen_helper_shsub8)
5787 
5788 DO_PAR_ADDSUB(UHADD16, gen_helper_uhadd16)
5789 DO_PAR_ADDSUB(UHASX, gen_helper_uhaddsubx)
5790 DO_PAR_ADDSUB(UHSAX, gen_helper_uhsubaddx)
5791 DO_PAR_ADDSUB(UHSUB16, gen_helper_uhsub16)
5792 DO_PAR_ADDSUB(UHADD8, gen_helper_uhadd8)
5793 DO_PAR_ADDSUB(UHSUB8, gen_helper_uhsub8)
5794 
5795 #undef DO_PAR_ADDSUB
5796 #undef DO_PAR_ADDSUB_GE
5797 
5798 /*
5799  * Packing, unpacking, saturation, and reversal
5800  */
5801 
5802 static bool trans_PKH(DisasContext *s, arg_PKH *a)
5803 {
5804     TCGv_i32 tn, tm;
5805     int shift = a->imm;
5806 
5807     if (s->thumb
5808         ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5809         : !ENABLE_ARCH_6) {
5810         return false;
5811     }
5812 
5813     tn = load_reg(s, a->rn);
5814     tm = load_reg(s, a->rm);
5815     if (a->tb) {
5816         /* PKHTB */
5817         if (shift == 0) {
5818             shift = 31;
5819         }
5820         tcg_gen_sari_i32(tm, tm, shift);
5821         tcg_gen_deposit_i32(tn, tn, tm, 0, 16);
5822     } else {
5823         /* PKHBT */
5824         tcg_gen_shli_i32(tm, tm, shift);
5825         tcg_gen_deposit_i32(tn, tm, tn, 0, 16);
5826     }
5827     store_reg(s, a->rd, tn);
5828     return true;
5829 }
5830 
op_sat(DisasContext * s,arg_sat * a,void (* gen)(TCGv_i32,TCGv_env,TCGv_i32,TCGv_i32))5831 static bool op_sat(DisasContext *s, arg_sat *a,
5832                    void (*gen)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
5833 {
5834     TCGv_i32 tmp;
5835     int shift = a->imm;
5836 
5837     if (!ENABLE_ARCH_6) {
5838         return false;
5839     }
5840 
5841     tmp = load_reg(s, a->rn);
5842     if (a->sh) {
5843         tcg_gen_sari_i32(tmp, tmp, shift ? shift : 31);
5844     } else {
5845         tcg_gen_shli_i32(tmp, tmp, shift);
5846     }
5847 
5848     gen(tmp, tcg_env, tmp, tcg_constant_i32(a->satimm));
5849 
5850     store_reg(s, a->rd, tmp);
5851     return true;
5852 }
5853 
trans_SSAT(DisasContext * s,arg_sat * a)5854 static bool trans_SSAT(DisasContext *s, arg_sat *a)
5855 {
5856     return op_sat(s, a, gen_helper_ssat);
5857 }
5858 
trans_USAT(DisasContext * s,arg_sat * a)5859 static bool trans_USAT(DisasContext *s, arg_sat *a)
5860 {
5861     return op_sat(s, a, gen_helper_usat);
5862 }
5863 
trans_SSAT16(DisasContext * s,arg_sat * a)5864 static bool trans_SSAT16(DisasContext *s, arg_sat *a)
5865 {
5866     if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
5867         return false;
5868     }
5869     return op_sat(s, a, gen_helper_ssat16);
5870 }
5871 
trans_USAT16(DisasContext * s,arg_sat * a)5872 static bool trans_USAT16(DisasContext *s, arg_sat *a)
5873 {
5874     if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
5875         return false;
5876     }
5877     return op_sat(s, a, gen_helper_usat16);
5878 }
5879 
op_xta(DisasContext * s,arg_rrr_rot * a,void (* gen_extract)(TCGv_i32,TCGv_i32),void (* gen_add)(TCGv_i32,TCGv_i32,TCGv_i32))5880 static bool op_xta(DisasContext *s, arg_rrr_rot *a,
5881                    void (*gen_extract)(TCGv_i32, TCGv_i32),
5882                    void (*gen_add)(TCGv_i32, TCGv_i32, TCGv_i32))
5883 {
5884     TCGv_i32 tmp;
5885 
5886     if (!ENABLE_ARCH_6) {
5887         return false;
5888     }
5889 
5890     tmp = load_reg(s, a->rm);
5891     /*
5892      * TODO: In many cases we could do a shift instead of a rotate.
5893      * Combined with a simple extend, that becomes an extract.
5894      */
5895     tcg_gen_rotri_i32(tmp, tmp, a->rot * 8);
5896     gen_extract(tmp, tmp);
5897 
5898     if (a->rn != 15) {
5899         TCGv_i32 tmp2 = load_reg(s, a->rn);
5900         gen_add(tmp, tmp, tmp2);
5901     }
5902     store_reg(s, a->rd, tmp);
5903     return true;
5904 }
5905 
trans_SXTAB(DisasContext * s,arg_rrr_rot * a)5906 static bool trans_SXTAB(DisasContext *s, arg_rrr_rot *a)
5907 {
5908     return op_xta(s, a, tcg_gen_ext8s_i32, tcg_gen_add_i32);
5909 }
5910 
trans_SXTAH(DisasContext * s,arg_rrr_rot * a)5911 static bool trans_SXTAH(DisasContext *s, arg_rrr_rot *a)
5912 {
5913     return op_xta(s, a, tcg_gen_ext16s_i32, tcg_gen_add_i32);
5914 }
5915 
trans_SXTAB16(DisasContext * s,arg_rrr_rot * a)5916 static bool trans_SXTAB16(DisasContext *s, arg_rrr_rot *a)
5917 {
5918     if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
5919         return false;
5920     }
5921     return op_xta(s, a, gen_helper_sxtb16, gen_add16);
5922 }
5923 
trans_UXTAB(DisasContext * s,arg_rrr_rot * a)5924 static bool trans_UXTAB(DisasContext *s, arg_rrr_rot *a)
5925 {
5926     return op_xta(s, a, tcg_gen_ext8u_i32, tcg_gen_add_i32);
5927 }
5928 
trans_UXTAH(DisasContext * s,arg_rrr_rot * a)5929 static bool trans_UXTAH(DisasContext *s, arg_rrr_rot *a)
5930 {
5931     return op_xta(s, a, tcg_gen_ext16u_i32, tcg_gen_add_i32);
5932 }
5933 
trans_UXTAB16(DisasContext * s,arg_rrr_rot * a)5934 static bool trans_UXTAB16(DisasContext *s, arg_rrr_rot *a)
5935 {
5936     if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
5937         return false;
5938     }
5939     return op_xta(s, a, gen_helper_uxtb16, gen_add16);
5940 }
5941 
trans_SEL(DisasContext * s,arg_rrr * a)5942 static bool trans_SEL(DisasContext *s, arg_rrr *a)
5943 {
5944     TCGv_i32 t1, t2, t3;
5945 
5946     if (s->thumb
5947         ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5948         : !ENABLE_ARCH_6) {
5949         return false;
5950     }
5951 
5952     t1 = load_reg(s, a->rn);
5953     t2 = load_reg(s, a->rm);
5954     t3 = tcg_temp_new_i32();
5955     tcg_gen_ld_i32(t3, tcg_env, offsetof(CPUARMState, GE));
5956     gen_helper_sel_flags(t1, t3, t1, t2);
5957     store_reg(s, a->rd, t1);
5958     return true;
5959 }
5960 
op_rr(DisasContext * s,arg_rr * a,void (* gen)(TCGv_i32,TCGv_i32))5961 static bool op_rr(DisasContext *s, arg_rr *a,
5962                   void (*gen)(TCGv_i32, TCGv_i32))
5963 {
5964     TCGv_i32 tmp;
5965 
5966     tmp = load_reg(s, a->rm);
5967     gen(tmp, tmp);
5968     store_reg(s, a->rd, tmp);
5969     return true;
5970 }
5971 
trans_REV(DisasContext * s,arg_rr * a)5972 static bool trans_REV(DisasContext *s, arg_rr *a)
5973 {
5974     if (!ENABLE_ARCH_6) {
5975         return false;
5976     }
5977     return op_rr(s, a, tcg_gen_bswap32_i32);
5978 }
5979 
trans_REV16(DisasContext * s,arg_rr * a)5980 static bool trans_REV16(DisasContext *s, arg_rr *a)
5981 {
5982     if (!ENABLE_ARCH_6) {
5983         return false;
5984     }
5985     return op_rr(s, a, gen_rev16);
5986 }
5987 
trans_REVSH(DisasContext * s,arg_rr * a)5988 static bool trans_REVSH(DisasContext *s, arg_rr *a)
5989 {
5990     if (!ENABLE_ARCH_6) {
5991         return false;
5992     }
5993     return op_rr(s, a, gen_revsh);
5994 }
5995 
trans_RBIT(DisasContext * s,arg_rr * a)5996 static bool trans_RBIT(DisasContext *s, arg_rr *a)
5997 {
5998     if (!ENABLE_ARCH_6T2) {
5999         return false;
6000     }
6001     return op_rr(s, a, gen_helper_rbit);
6002 }
6003 
6004 /*
6005  * Signed multiply, signed and unsigned divide
6006  */
6007 
op_smlad(DisasContext * s,arg_rrrr * a,bool m_swap,bool sub)6008 static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
6009 {
6010     TCGv_i32 t1, t2;
6011 
6012     if (!ENABLE_ARCH_6) {
6013         return false;
6014     }
6015 
6016     t1 = load_reg(s, a->rn);
6017     t2 = load_reg(s, a->rm);
6018     if (m_swap) {
6019         gen_swap_half(t2, t2);
6020     }
6021     gen_smul_dual(t1, t2);
6022 
6023     if (sub) {
6024         /*
6025          * This subtraction cannot overflow, so we can do a simple
6026          * 32-bit subtraction and then a possible 32-bit saturating
6027          * addition of Ra.
6028          */
6029         tcg_gen_sub_i32(t1, t1, t2);
6030 
6031         if (a->ra != 15) {
6032             t2 = load_reg(s, a->ra);
6033             gen_helper_add_setq(t1, tcg_env, t1, t2);
6034         }
6035     } else if (a->ra == 15) {
6036         /* Single saturation-checking addition */
6037         gen_helper_add_setq(t1, tcg_env, t1, t2);
6038     } else {
6039         /*
6040          * We need to add the products and Ra together and then
6041          * determine whether the final result overflowed. Doing
6042          * this as two separate add-and-check-overflow steps incorrectly
6043          * sets Q for cases like (-32768 * -32768) + (-32768 * -32768) + -1.
6044          * Do all the arithmetic at 64-bits and then check for overflow.
6045          */
6046         TCGv_i64 p64, q64;
6047         TCGv_i32 t3, qf, one;
6048 
6049         p64 = tcg_temp_new_i64();
6050         q64 = tcg_temp_new_i64();
6051         tcg_gen_ext_i32_i64(p64, t1);
6052         tcg_gen_ext_i32_i64(q64, t2);
6053         tcg_gen_add_i64(p64, p64, q64);
6054         load_reg_var(s, t2, a->ra);
6055         tcg_gen_ext_i32_i64(q64, t2);
6056         tcg_gen_add_i64(p64, p64, q64);
6057 
6058         tcg_gen_extr_i64_i32(t1, t2, p64);
6059         /*
6060          * t1 is the low half of the result which goes into Rd.
6061          * We have overflow and must set Q if the high half (t2)
6062          * is different from the sign-extension of t1.
6063          */
6064         t3 = tcg_temp_new_i32();
6065         tcg_gen_sari_i32(t3, t1, 31);
6066         qf = load_cpu_field(QF);
6067         one = tcg_constant_i32(1);
6068         tcg_gen_movcond_i32(TCG_COND_NE, qf, t2, t3, one, qf);
6069         store_cpu_field(qf, QF);
6070     }
6071     store_reg(s, a->rd, t1);
6072     return true;
6073 }
6074 
trans_SMLAD(DisasContext * s,arg_rrrr * a)6075 static bool trans_SMLAD(DisasContext *s, arg_rrrr *a)
6076 {
6077     return op_smlad(s, a, false, false);
6078 }
6079 
trans_SMLADX(DisasContext * s,arg_rrrr * a)6080 static bool trans_SMLADX(DisasContext *s, arg_rrrr *a)
6081 {
6082     return op_smlad(s, a, true, false);
6083 }
6084 
trans_SMLSD(DisasContext * s,arg_rrrr * a)6085 static bool trans_SMLSD(DisasContext *s, arg_rrrr *a)
6086 {
6087     return op_smlad(s, a, false, true);
6088 }
6089 
trans_SMLSDX(DisasContext * s,arg_rrrr * a)6090 static bool trans_SMLSDX(DisasContext *s, arg_rrrr *a)
6091 {
6092     return op_smlad(s, a, true, true);
6093 }
6094 
op_smlald(DisasContext * s,arg_rrrr * a,bool m_swap,bool sub)6095 static bool op_smlald(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
6096 {
6097     TCGv_i32 t1, t2;
6098     TCGv_i64 l1, l2;
6099 
6100     if (!ENABLE_ARCH_6) {
6101         return false;
6102     }
6103 
6104     t1 = load_reg(s, a->rn);
6105     t2 = load_reg(s, a->rm);
6106     if (m_swap) {
6107         gen_swap_half(t2, t2);
6108     }
6109     gen_smul_dual(t1, t2);
6110 
6111     l1 = tcg_temp_new_i64();
6112     l2 = tcg_temp_new_i64();
6113     tcg_gen_ext_i32_i64(l1, t1);
6114     tcg_gen_ext_i32_i64(l2, t2);
6115 
6116     if (sub) {
6117         tcg_gen_sub_i64(l1, l1, l2);
6118     } else {
6119         tcg_gen_add_i64(l1, l1, l2);
6120     }
6121 
6122     gen_addq(s, l1, a->ra, a->rd);
6123     gen_storeq_reg(s, a->ra, a->rd, l1);
6124     return true;
6125 }
6126 
trans_SMLALD(DisasContext * s,arg_rrrr * a)6127 static bool trans_SMLALD(DisasContext *s, arg_rrrr *a)
6128 {
6129     return op_smlald(s, a, false, false);
6130 }
6131 
trans_SMLALDX(DisasContext * s,arg_rrrr * a)6132 static bool trans_SMLALDX(DisasContext *s, arg_rrrr *a)
6133 {
6134     return op_smlald(s, a, true, false);
6135 }
6136 
trans_SMLSLD(DisasContext * s,arg_rrrr * a)6137 static bool trans_SMLSLD(DisasContext *s, arg_rrrr *a)
6138 {
6139     return op_smlald(s, a, false, true);
6140 }
6141 
trans_SMLSLDX(DisasContext * s,arg_rrrr * a)6142 static bool trans_SMLSLDX(DisasContext *s, arg_rrrr *a)
6143 {
6144     return op_smlald(s, a, true, true);
6145 }
6146 
op_smmla(DisasContext * s,arg_rrrr * a,bool round,bool sub)6147 static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub)
6148 {
6149     TCGv_i32 t1, t2;
6150 
6151     if (s->thumb
6152         ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
6153         : !ENABLE_ARCH_6) {
6154         return false;
6155     }
6156 
6157     t1 = load_reg(s, a->rn);
6158     t2 = load_reg(s, a->rm);
6159     tcg_gen_muls2_i32(t2, t1, t1, t2);
6160 
6161     if (a->ra != 15) {
6162         TCGv_i32 t3 = load_reg(s, a->ra);
6163         if (sub) {
6164             /*
6165              * For SMMLS, we need a 64-bit subtract.  Borrow caused by
6166              * a non-zero multiplicand lowpart, and the correct result
6167              * lowpart for rounding.
6168              */
6169             tcg_gen_sub2_i32(t2, t1, tcg_constant_i32(0), t3, t2, t1);
6170         } else {
6171             tcg_gen_add_i32(t1, t1, t3);
6172         }
6173     }
6174     if (round) {
6175         /*
6176          * Adding 0x80000000 to the 64-bit quantity means that we have
6177          * carry in to the high word when the low word has the msb set.
6178          */
6179         tcg_gen_shri_i32(t2, t2, 31);
6180         tcg_gen_add_i32(t1, t1, t2);
6181     }
6182     store_reg(s, a->rd, t1);
6183     return true;
6184 }
6185 
trans_SMMLA(DisasContext * s,arg_rrrr * a)6186 static bool trans_SMMLA(DisasContext *s, arg_rrrr *a)
6187 {
6188     return op_smmla(s, a, false, false);
6189 }
6190 
trans_SMMLAR(DisasContext * s,arg_rrrr * a)6191 static bool trans_SMMLAR(DisasContext *s, arg_rrrr *a)
6192 {
6193     return op_smmla(s, a, true, false);
6194 }
6195 
trans_SMMLS(DisasContext * s,arg_rrrr * a)6196 static bool trans_SMMLS(DisasContext *s, arg_rrrr *a)
6197 {
6198     return op_smmla(s, a, false, true);
6199 }
6200 
trans_SMMLSR(DisasContext * s,arg_rrrr * a)6201 static bool trans_SMMLSR(DisasContext *s, arg_rrrr *a)
6202 {
6203     return op_smmla(s, a, true, true);
6204 }
6205 
op_div(DisasContext * s,arg_rrr * a,bool u)6206 static bool op_div(DisasContext *s, arg_rrr *a, bool u)
6207 {
6208     TCGv_i32 t1, t2;
6209 
6210     if (s->thumb
6211         ? !dc_isar_feature(aa32_thumb_div, s)
6212         : !dc_isar_feature(aa32_arm_div, s)) {
6213         return false;
6214     }
6215 
6216     t1 = load_reg(s, a->rn);
6217     t2 = load_reg(s, a->rm);
6218     if (u) {
6219         gen_helper_udiv(t1, tcg_env, t1, t2);
6220     } else {
6221         gen_helper_sdiv(t1, tcg_env, t1, t2);
6222     }
6223     store_reg(s, a->rd, t1);
6224     return true;
6225 }
6226 
trans_SDIV(DisasContext * s,arg_rrr * a)6227 static bool trans_SDIV(DisasContext *s, arg_rrr *a)
6228 {
6229     return op_div(s, a, false);
6230 }
6231 
trans_UDIV(DisasContext * s,arg_rrr * a)6232 static bool trans_UDIV(DisasContext *s, arg_rrr *a)
6233 {
6234     return op_div(s, a, true);
6235 }
6236 
6237 /*
6238  * Block data transfer
6239  */
6240 
op_addr_block_pre(DisasContext * s,arg_ldst_block * a,int n)6241 static TCGv_i32 op_addr_block_pre(DisasContext *s, arg_ldst_block *a, int n)
6242 {
6243     TCGv_i32 addr = load_reg(s, a->rn);
6244 
6245     if (a->b) {
6246         if (a->i) {
6247             /* pre increment */
6248             tcg_gen_addi_i32(addr, addr, 4);
6249         } else {
6250             /* pre decrement */
6251             tcg_gen_addi_i32(addr, addr, -(n * 4));
6252         }
6253     } else if (!a->i && n != 1) {
6254         /* post decrement */
6255         tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6256     }
6257 
6258     if (s->v8m_stackcheck && a->rn == 13 && a->w) {
6259         /*
6260          * If the writeback is incrementing SP rather than
6261          * decrementing it, and the initial SP is below the
6262          * stack limit but the final written-back SP would
6263          * be above, then we must not perform any memory
6264          * accesses, but it is IMPDEF whether we generate
6265          * an exception. We choose to do so in this case.
6266          * At this point 'addr' is the lowest address, so
6267          * either the original SP (if incrementing) or our
6268          * final SP (if decrementing), so that's what we check.
6269          */
6270         gen_helper_v8m_stackcheck(tcg_env, addr);
6271     }
6272 
6273     return addr;
6274 }
6275 
op_addr_block_post(DisasContext * s,arg_ldst_block * a,TCGv_i32 addr,int n)6276 static void op_addr_block_post(DisasContext *s, arg_ldst_block *a,
6277                                TCGv_i32 addr, int n)
6278 {
6279     if (a->w) {
6280         /* write back */
6281         if (!a->b) {
6282             if (a->i) {
6283                 /* post increment */
6284                 tcg_gen_addi_i32(addr, addr, 4);
6285             } else {
6286                 /* post decrement */
6287                 tcg_gen_addi_i32(addr, addr, -(n * 4));
6288             }
6289         } else if (!a->i && n != 1) {
6290             /* pre decrement */
6291             tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6292         }
6293         store_reg(s, a->rn, addr);
6294     }
6295 }
6296 
op_stm(DisasContext * s,arg_ldst_block * a)6297 static bool op_stm(DisasContext *s, arg_ldst_block *a)
6298 {
6299     int i, j, n, list, mem_idx;
6300     bool user = a->u;
6301     TCGv_i32 addr, tmp;
6302 
6303     if (user) {
6304         /* STM (user) */
6305         if (IS_USER(s)) {
6306             /* Only usable in supervisor mode.  */
6307             unallocated_encoding(s);
6308             return true;
6309         }
6310     }
6311 
6312     list = a->list;
6313     n = ctpop16(list);
6314     /*
6315      * This is UNPREDICTABLE for n < 1 in all encodings, and we choose
6316      * to UNDEF. In the T32 STM encoding n == 1 is also UNPREDICTABLE,
6317      * but hardware treats it like the A32 version and implements the
6318      * single-register-store, and some in-the-wild (buggy) software
6319      * assumes that, so we don't UNDEF on that case.
6320      */
6321     if (n < 1 || a->rn == 15) {
6322         unallocated_encoding(s);
6323         return true;
6324     }
6325 
6326     s->eci_handled = true;
6327 
6328     addr = op_addr_block_pre(s, a, n);
6329     mem_idx = get_mem_index(s);
6330 
6331     for (i = j = 0; i < 16; i++) {
6332         if (!(list & (1 << i))) {
6333             continue;
6334         }
6335 
6336         if (user && i != 15) {
6337             tmp = tcg_temp_new_i32();
6338             gen_helper_get_user_reg(tmp, tcg_env, tcg_constant_i32(i));
6339         } else {
6340             tmp = load_reg(s, i);
6341         }
6342         gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
6343 
6344         /* No need to add after the last transfer.  */
6345         if (++j != n) {
6346             tcg_gen_addi_i32(addr, addr, 4);
6347         }
6348     }
6349 
6350     op_addr_block_post(s, a, addr, n);
6351     clear_eci_state(s);
6352     return true;
6353 }
6354 
trans_STM(DisasContext * s,arg_ldst_block * a)6355 static bool trans_STM(DisasContext *s, arg_ldst_block *a)
6356 {
6357     return op_stm(s, a);
6358 }
6359 
trans_STM_t32(DisasContext * s,arg_ldst_block * a)6360 static bool trans_STM_t32(DisasContext *s, arg_ldst_block *a)
6361 {
6362     /* Writeback register in register list is UNPREDICTABLE for T32.  */
6363     if (a->w && (a->list & (1 << a->rn))) {
6364         unallocated_encoding(s);
6365         return true;
6366     }
6367     return op_stm(s, a);
6368 }
6369 
do_ldm(DisasContext * s,arg_ldst_block * a)6370 static bool do_ldm(DisasContext *s, arg_ldst_block *a)
6371 {
6372     int i, j, n, list, mem_idx;
6373     bool loaded_base;
6374     bool user = a->u;
6375     bool exc_return = false;
6376     TCGv_i32 addr, tmp, loaded_var;
6377 
6378     if (user) {
6379         /* LDM (user), LDM (exception return) */
6380         if (IS_USER(s)) {
6381             /* Only usable in supervisor mode.  */
6382             unallocated_encoding(s);
6383             return true;
6384         }
6385         if (extract32(a->list, 15, 1)) {
6386             exc_return = true;
6387             user = false;
6388         } else {
6389             /* LDM (user) does not allow writeback.  */
6390             if (a->w) {
6391                 unallocated_encoding(s);
6392                 return true;
6393             }
6394         }
6395     }
6396 
6397     list = a->list;
6398     n = ctpop16(list);
6399     /*
6400      * This is UNPREDICTABLE for n < 1 in all encodings, and we choose
6401      * to UNDEF. In the T32 LDM encoding n == 1 is also UNPREDICTABLE,
6402      * but hardware treats it like the A32 version and implements the
6403      * single-register-load, and some in-the-wild (buggy) software
6404      * assumes that, so we don't UNDEF on that case.
6405      */
6406     if (n < 1 || a->rn == 15) {
6407         unallocated_encoding(s);
6408         return true;
6409     }
6410 
6411     s->eci_handled = true;
6412 
6413     addr = op_addr_block_pre(s, a, n);
6414     mem_idx = get_mem_index(s);
6415     loaded_base = false;
6416     loaded_var = NULL;
6417 
6418     for (i = j = 0; i < 16; i++) {
6419         if (!(list & (1 << i))) {
6420             continue;
6421         }
6422 
6423         tmp = tcg_temp_new_i32();
6424         gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
6425         if (user) {
6426             gen_helper_set_user_reg(tcg_env, tcg_constant_i32(i), tmp);
6427         } else if (i == a->rn) {
6428             loaded_var = tmp;
6429             loaded_base = true;
6430         } else if (i == 15 && exc_return) {
6431             store_pc_exc_ret(s, tmp);
6432         } else {
6433             store_reg_from_load(s, i, tmp);
6434         }
6435 
6436         /* No need to add after the last transfer.  */
6437         if (++j != n) {
6438             tcg_gen_addi_i32(addr, addr, 4);
6439         }
6440     }
6441 
6442     op_addr_block_post(s, a, addr, n);
6443 
6444     if (loaded_base) {
6445         /* Note that we reject base == pc above.  */
6446         store_reg(s, a->rn, loaded_var);
6447     }
6448 
6449     if (exc_return) {
6450         /* Restore CPSR from SPSR.  */
6451         tmp = load_cpu_field(spsr);
6452         translator_io_start(&s->base);
6453         gen_helper_cpsr_write_eret(tcg_env, tmp);
6454         /* Must exit loop to check un-masked IRQs */
6455         s->base.is_jmp = DISAS_EXIT;
6456     }
6457     clear_eci_state(s);
6458     return true;
6459 }
6460 
trans_LDM_a32(DisasContext * s,arg_ldst_block * a)6461 static bool trans_LDM_a32(DisasContext *s, arg_ldst_block *a)
6462 {
6463     /*
6464      * Writeback register in register list is UNPREDICTABLE
6465      * for ArchVersion() >= 7.  Prior to v7, A32 would write
6466      * an UNKNOWN value to the base register.
6467      */
6468     if (ENABLE_ARCH_7 && a->w && (a->list & (1 << a->rn))) {
6469         unallocated_encoding(s);
6470         return true;
6471     }
6472     return do_ldm(s, a);
6473 }
6474 
trans_LDM_t32(DisasContext * s,arg_ldst_block * a)6475 static bool trans_LDM_t32(DisasContext *s, arg_ldst_block *a)
6476 {
6477     /* Writeback register in register list is UNPREDICTABLE for T32. */
6478     if (a->w && (a->list & (1 << a->rn))) {
6479         unallocated_encoding(s);
6480         return true;
6481     }
6482     return do_ldm(s, a);
6483 }
6484 
trans_LDM_t16(DisasContext * s,arg_ldst_block * a)6485 static bool trans_LDM_t16(DisasContext *s, arg_ldst_block *a)
6486 {
6487     /* Writeback is conditional on the base register not being loaded.  */
6488     a->w = !(a->list & (1 << a->rn));
6489     return do_ldm(s, a);
6490 }
6491 
trans_CLRM(DisasContext * s,arg_CLRM * a)6492 static bool trans_CLRM(DisasContext *s, arg_CLRM *a)
6493 {
6494     int i;
6495     TCGv_i32 zero;
6496 
6497     if (!dc_isar_feature(aa32_m_sec_state, s)) {
6498         return false;
6499     }
6500 
6501     if (extract32(a->list, 13, 1)) {
6502         return false;
6503     }
6504 
6505     if (!a->list) {
6506         /* UNPREDICTABLE; we choose to UNDEF */
6507         return false;
6508     }
6509 
6510     s->eci_handled = true;
6511 
6512     zero = tcg_constant_i32(0);
6513     for (i = 0; i < 15; i++) {
6514         if (extract32(a->list, i, 1)) {
6515             /* Clear R[i] */
6516             tcg_gen_mov_i32(cpu_R[i], zero);
6517         }
6518     }
6519     if (extract32(a->list, 15, 1)) {
6520         /*
6521          * Clear APSR (by calling the MSR helper with the same argument
6522          * as for "MSR APSR_nzcvqg, Rn": mask = 0b1100, SYSM=0)
6523          */
6524         gen_helper_v7m_msr(tcg_env, tcg_constant_i32(0xc00), zero);
6525     }
6526     clear_eci_state(s);
6527     return true;
6528 }
6529 
6530 /*
6531  * Branch, branch with link
6532  */
6533 
trans_B(DisasContext * s,arg_i * a)6534 static bool trans_B(DisasContext *s, arg_i *a)
6535 {
6536     gen_jmp(s, jmp_diff(s, a->imm));
6537     return true;
6538 }
6539 
trans_B_cond_thumb(DisasContext * s,arg_ci * a)6540 static bool trans_B_cond_thumb(DisasContext *s, arg_ci *a)
6541 {
6542     /* This has cond from encoding, required to be outside IT block.  */
6543     if (a->cond >= 0xe) {
6544         return false;
6545     }
6546     if (s->condexec_mask) {
6547         unallocated_encoding(s);
6548         return true;
6549     }
6550     arm_skip_unless(s, a->cond);
6551     gen_jmp(s, jmp_diff(s, a->imm));
6552     return true;
6553 }
6554 
trans_BL(DisasContext * s,arg_i * a)6555 static bool trans_BL(DisasContext *s, arg_i *a)
6556 {
6557     gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
6558     gen_jmp(s, jmp_diff(s, a->imm));
6559     return true;
6560 }
6561 
trans_BLX_i(DisasContext * s,arg_BLX_i * a)6562 static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
6563 {
6564     /*
6565      * BLX <imm> would be useless on M-profile; the encoding space
6566      * is used for other insns from v8.1M onward, and UNDEFs before that.
6567      */
6568     if (arm_dc_feature(s, ARM_FEATURE_M)) {
6569         return false;
6570     }
6571 
6572     /* For A32, ARM_FEATURE_V5 is checked near the start of the uncond block. */
6573     if (s->thumb && (a->imm & 2)) {
6574         return false;
6575     }
6576     gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
6577     store_cpu_field_constant(!s->thumb, thumb);
6578     /* This jump is computed from an aligned PC: subtract off the low bits. */
6579     gen_jmp(s, jmp_diff(s, a->imm - (s->pc_curr & 3)));
6580     return true;
6581 }
6582 
trans_BL_BLX_prefix(DisasContext * s,arg_BL_BLX_prefix * a)6583 static bool trans_BL_BLX_prefix(DisasContext *s, arg_BL_BLX_prefix *a)
6584 {
6585     assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
6586     gen_pc_plus_diff(s, cpu_R[14], jmp_diff(s, a->imm << 12));
6587     return true;
6588 }
6589 
trans_BL_suffix(DisasContext * s,arg_BL_suffix * a)6590 static bool trans_BL_suffix(DisasContext *s, arg_BL_suffix *a)
6591 {
6592     TCGv_i32 tmp = tcg_temp_new_i32();
6593 
6594     assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
6595     tcg_gen_addi_i32(tmp, cpu_R[14], (a->imm << 1) | 1);
6596     gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | 1);
6597     gen_bx(s, tmp);
6598     return true;
6599 }
6600 
trans_BLX_suffix(DisasContext * s,arg_BLX_suffix * a)6601 static bool trans_BLX_suffix(DisasContext *s, arg_BLX_suffix *a)
6602 {
6603     TCGv_i32 tmp;
6604 
6605     assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
6606     if (!ENABLE_ARCH_5) {
6607         return false;
6608     }
6609     tmp = tcg_temp_new_i32();
6610     tcg_gen_addi_i32(tmp, cpu_R[14], a->imm << 1);
6611     tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
6612     gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | 1);
6613     gen_bx(s, tmp);
6614     return true;
6615 }
6616 
trans_BF(DisasContext * s,arg_BF * a)6617 static bool trans_BF(DisasContext *s, arg_BF *a)
6618 {
6619     /*
6620      * M-profile branch future insns. The architecture permits an
6621      * implementation to implement these as NOPs (equivalent to
6622      * discarding the LO_BRANCH_INFO cache immediately), and we
6623      * take that IMPDEF option because for QEMU a "real" implementation
6624      * would be complicated and wouldn't execute any faster.
6625      */
6626     if (!dc_isar_feature(aa32_lob, s)) {
6627         return false;
6628     }
6629     if (a->boff == 0) {
6630         /* SEE "Related encodings" (loop insns) */
6631         return false;
6632     }
6633     /* Handle as NOP */
6634     return true;
6635 }
6636 
trans_DLS(DisasContext * s,arg_DLS * a)6637 static bool trans_DLS(DisasContext *s, arg_DLS *a)
6638 {
6639     /* M-profile low-overhead loop start */
6640     TCGv_i32 tmp;
6641 
6642     if (!dc_isar_feature(aa32_lob, s)) {
6643         return false;
6644     }
6645     if (a->rn == 13 || a->rn == 15) {
6646         /*
6647          * For DLSTP rn == 15 is a related encoding (LCTP); the
6648          * other cases caught by this condition are all
6649          * CONSTRAINED UNPREDICTABLE: we choose to UNDEF
6650          */
6651         return false;
6652     }
6653 
6654     if (a->size != 4) {
6655         /* DLSTP */
6656         if (!dc_isar_feature(aa32_mve, s)) {
6657             return false;
6658         }
6659         if (!vfp_access_check(s)) {
6660             return true;
6661         }
6662     }
6663 
6664     /* Not a while loop: set LR to the count, and set LTPSIZE for DLSTP */
6665     tmp = load_reg(s, a->rn);
6666     store_reg(s, 14, tmp);
6667     if (a->size != 4) {
6668         /* DLSTP: set FPSCR.LTPSIZE */
6669         store_cpu_field(tcg_constant_i32(a->size), v7m.ltpsize);
6670         s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
6671     }
6672     return true;
6673 }
6674 
trans_WLS(DisasContext * s,arg_WLS * a)6675 static bool trans_WLS(DisasContext *s, arg_WLS *a)
6676 {
6677     /* M-profile low-overhead while-loop start */
6678     TCGv_i32 tmp;
6679     DisasLabel nextlabel;
6680 
6681     if (!dc_isar_feature(aa32_lob, s)) {
6682         return false;
6683     }
6684     if (a->rn == 13 || a->rn == 15) {
6685         /*
6686          * For WLSTP rn == 15 is a related encoding (LE); the
6687          * other cases caught by this condition are all
6688          * CONSTRAINED UNPREDICTABLE: we choose to UNDEF
6689          */
6690         return false;
6691     }
6692     if (s->condexec_mask) {
6693         /*
6694          * WLS in an IT block is CONSTRAINED UNPREDICTABLE;
6695          * we choose to UNDEF, because otherwise our use of
6696          * gen_goto_tb(1) would clash with the use of TB exit 1
6697          * in the dc->condjmp condition-failed codepath in
6698          * arm_tr_tb_stop() and we'd get an assertion.
6699          */
6700         return false;
6701     }
6702     if (a->size != 4) {
6703         /* WLSTP */
6704         if (!dc_isar_feature(aa32_mve, s)) {
6705             return false;
6706         }
6707         /*
6708          * We need to check that the FPU is enabled here, but mustn't
6709          * call vfp_access_check() to do that because we don't want to
6710          * do the lazy state preservation in the "loop count is zero" case.
6711          * Do the check-and-raise-exception by hand.
6712          */
6713         if (s->fp_excp_el) {
6714             gen_exception_insn_el(s, 0, EXCP_NOCP,
6715                                   syn_uncategorized(), s->fp_excp_el);
6716             return true;
6717         }
6718     }
6719 
6720     nextlabel = gen_disas_label(s);
6721     tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_R[a->rn], 0, nextlabel.label);
6722     tmp = load_reg(s, a->rn);
6723     store_reg(s, 14, tmp);
6724     if (a->size != 4) {
6725         /*
6726          * WLSTP: set FPSCR.LTPSIZE. This requires that we do the
6727          * lazy state preservation, new FP context creation, etc,
6728          * that vfp_access_check() does. We know that the actual
6729          * access check will succeed (ie it won't generate code that
6730          * throws an exception) because we did that check by hand earlier.
6731          */
6732         bool ok = vfp_access_check(s);
6733         assert(ok);
6734         store_cpu_field(tcg_constant_i32(a->size), v7m.ltpsize);
6735         /*
6736          * LTPSIZE updated, but MVE_NO_PRED will always be the same thing (0)
6737          * when we take this upcoming exit from this TB, so gen_jmp_tb() is OK.
6738          */
6739     }
6740     gen_jmp_tb(s, curr_insn_len(s), 1);
6741 
6742     set_disas_label(s, nextlabel);
6743     gen_jmp(s, jmp_diff(s, a->imm));
6744     return true;
6745 }
6746 
trans_LE(DisasContext * s,arg_LE * a)6747 static bool trans_LE(DisasContext *s, arg_LE *a)
6748 {
6749     /*
6750      * M-profile low-overhead loop end. The architecture permits an
6751      * implementation to discard the LO_BRANCH_INFO cache at any time,
6752      * and we take the IMPDEF option to never set it in the first place
6753      * (equivalent to always discarding it immediately), because for QEMU
6754      * a "real" implementation would be complicated and wouldn't execute
6755      * any faster.
6756      */
6757     TCGv_i32 tmp;
6758     DisasLabel loopend;
6759     bool fpu_active;
6760 
6761     if (!dc_isar_feature(aa32_lob, s)) {
6762         return false;
6763     }
6764     if (a->f && a->tp) {
6765         return false;
6766     }
6767     if (s->condexec_mask) {
6768         /*
6769          * LE in an IT block is CONSTRAINED UNPREDICTABLE;
6770          * we choose to UNDEF, because otherwise our use of
6771          * gen_goto_tb(1) would clash with the use of TB exit 1
6772          * in the dc->condjmp condition-failed codepath in
6773          * arm_tr_tb_stop() and we'd get an assertion.
6774          */
6775         return false;
6776     }
6777     if (a->tp) {
6778         /* LETP */
6779         if (!dc_isar_feature(aa32_mve, s)) {
6780             return false;
6781         }
6782         if (!vfp_access_check(s)) {
6783             s->eci_handled = true;
6784             return true;
6785         }
6786     }
6787 
6788     /* LE/LETP is OK with ECI set and leaves it untouched */
6789     s->eci_handled = true;
6790 
6791     /*
6792      * With MVE, LTPSIZE might not be 4, and we must emit an INVSTATE
6793      * UsageFault exception for the LE insn in that case. Note that we
6794      * are not directly checking FPSCR.LTPSIZE but instead check the
6795      * pseudocode LTPSIZE() function, which returns 4 if the FPU is
6796      * not currently active (ie ActiveFPState() returns false). We
6797      * can identify not-active purely from our TB state flags, as the
6798      * FPU is active only if:
6799      *  the FPU is enabled
6800      *  AND lazy state preservation is not active
6801      *  AND we do not need a new fp context (this is the ASPEN/FPCA check)
6802      *
6803      * Usually we don't need to care about this distinction between
6804      * LTPSIZE and FPSCR.LTPSIZE, because the code in vfp_access_check()
6805      * will either take an exception or clear the conditions that make
6806      * the FPU not active. But LE is an unusual case of a non-FP insn
6807      * that looks at LTPSIZE.
6808      */
6809     fpu_active = !s->fp_excp_el && !s->v7m_lspact && !s->v7m_new_fp_ctxt_needed;
6810 
6811     if (!a->tp && dc_isar_feature(aa32_mve, s) && fpu_active) {
6812         /* Need to do a runtime check for LTPSIZE != 4 */
6813         DisasLabel skipexc = gen_disas_label(s);
6814         tmp = load_cpu_field(v7m.ltpsize);
6815         tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 4, skipexc.label);
6816         gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized());
6817         set_disas_label(s, skipexc);
6818     }
6819 
6820     if (a->f) {
6821         /* Loop-forever: just jump back to the loop start */
6822         gen_jmp(s, jmp_diff(s, -a->imm));
6823         return true;
6824     }
6825 
6826     /*
6827      * Not loop-forever. If LR <= loop-decrement-value this is the last loop.
6828      * For LE, we know at this point that LTPSIZE must be 4 and the
6829      * loop decrement value is 1. For LETP we need to calculate the decrement
6830      * value from LTPSIZE.
6831      */
6832     loopend = gen_disas_label(s);
6833     if (!a->tp) {
6834         tcg_gen_brcondi_i32(TCG_COND_LEU, cpu_R[14], 1, loopend.label);
6835         tcg_gen_addi_i32(cpu_R[14], cpu_R[14], -1);
6836     } else {
6837         /*
6838          * Decrement by 1 << (4 - LTPSIZE). We need to use a TCG local
6839          * so that decr stays live after the brcondi.
6840          */
6841         TCGv_i32 decr = tcg_temp_new_i32();
6842         TCGv_i32 ltpsize = load_cpu_field(v7m.ltpsize);
6843         tcg_gen_sub_i32(decr, tcg_constant_i32(4), ltpsize);
6844         tcg_gen_shl_i32(decr, tcg_constant_i32(1), decr);
6845 
6846         tcg_gen_brcond_i32(TCG_COND_LEU, cpu_R[14], decr, loopend.label);
6847 
6848         tcg_gen_sub_i32(cpu_R[14], cpu_R[14], decr);
6849     }
6850     /* Jump back to the loop start */
6851     gen_jmp(s, jmp_diff(s, -a->imm));
6852 
6853     set_disas_label(s, loopend);
6854     if (a->tp) {
6855         /* Exits from tail-pred loops must reset LTPSIZE to 4 */
6856         store_cpu_field(tcg_constant_i32(4), v7m.ltpsize);
6857     }
6858     /* End TB, continuing to following insn */
6859     gen_jmp_tb(s, curr_insn_len(s), 1);
6860     return true;
6861 }
6862 
trans_LCTP(DisasContext * s,arg_LCTP * a)6863 static bool trans_LCTP(DisasContext *s, arg_LCTP *a)
6864 {
6865     /*
6866      * M-profile Loop Clear with Tail Predication. Since our implementation
6867      * doesn't cache branch information, all we need to do is reset
6868      * FPSCR.LTPSIZE to 4.
6869      */
6870 
6871     if (!dc_isar_feature(aa32_lob, s) ||
6872         !dc_isar_feature(aa32_mve, s)) {
6873         return false;
6874     }
6875 
6876     if (!vfp_access_check(s)) {
6877         return true;
6878     }
6879 
6880     store_cpu_field_constant(4, v7m.ltpsize);
6881     return true;
6882 }
6883 
trans_VCTP(DisasContext * s,arg_VCTP * a)6884 static bool trans_VCTP(DisasContext *s, arg_VCTP *a)
6885 {
6886     /*
6887      * M-profile Create Vector Tail Predicate. This insn is itself
6888      * predicated and is subject to beatwise execution.
6889      */
6890     TCGv_i32 rn_shifted, masklen;
6891 
6892     if (!dc_isar_feature(aa32_mve, s) || a->rn == 13 || a->rn == 15) {
6893         return false;
6894     }
6895 
6896     if (!mve_eci_check(s) || !vfp_access_check(s)) {
6897         return true;
6898     }
6899 
6900     /*
6901      * We pre-calculate the mask length here to avoid having
6902      * to have multiple helpers specialized for size.
6903      * We pass the helper "rn <= (1 << (4 - size)) ? (rn << size) : 16".
6904      */
6905     rn_shifted = tcg_temp_new_i32();
6906     masklen = load_reg(s, a->rn);
6907     tcg_gen_shli_i32(rn_shifted, masklen, a->size);
6908     tcg_gen_movcond_i32(TCG_COND_LEU, masklen,
6909                         masklen, tcg_constant_i32(1 << (4 - a->size)),
6910                         rn_shifted, tcg_constant_i32(16));
6911     gen_helper_mve_vctp(tcg_env, masklen);
6912     /* This insn updates predication bits */
6913     s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
6914     mve_update_eci(s);
6915     return true;
6916 }
6917 
op_tbranch(DisasContext * s,arg_tbranch * a,bool half)6918 static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
6919 {
6920     TCGv_i32 addr, tmp;
6921 
6922     tmp = load_reg(s, a->rm);
6923     if (half) {
6924         tcg_gen_add_i32(tmp, tmp, tmp);
6925     }
6926     addr = load_reg(s, a->rn);
6927     tcg_gen_add_i32(addr, addr, tmp);
6928 
6929     gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), half ? MO_UW : MO_UB);
6930 
6931     tcg_gen_add_i32(tmp, tmp, tmp);
6932     gen_pc_plus_diff(s, addr, jmp_diff(s, 0));
6933     tcg_gen_add_i32(tmp, tmp, addr);
6934     store_reg(s, 15, tmp);
6935     return true;
6936 }
6937 
trans_TBB(DisasContext * s,arg_tbranch * a)6938 static bool trans_TBB(DisasContext *s, arg_tbranch *a)
6939 {
6940     return op_tbranch(s, a, false);
6941 }
6942 
trans_TBH(DisasContext * s,arg_tbranch * a)6943 static bool trans_TBH(DisasContext *s, arg_tbranch *a)
6944 {
6945     return op_tbranch(s, a, true);
6946 }
6947 
trans_CBZ(DisasContext * s,arg_CBZ * a)6948 static bool trans_CBZ(DisasContext *s, arg_CBZ *a)
6949 {
6950     TCGv_i32 tmp = load_reg(s, a->rn);
6951 
6952     arm_gen_condlabel(s);
6953     tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE,
6954                         tmp, 0, s->condlabel.label);
6955     gen_jmp(s, jmp_diff(s, a->imm));
6956     return true;
6957 }
6958 
6959 /*
6960  * Supervisor call - both T32 & A32 come here so we need to check
6961  * which mode we are in when checking for semihosting.
6962  */
6963 
trans_SVC(DisasContext * s,arg_SVC * a)6964 static bool trans_SVC(DisasContext *s, arg_SVC *a)
6965 {
6966     const uint32_t semihost_imm = s->thumb ? 0xab : 0x123456;
6967 
6968     if (!arm_dc_feature(s, ARM_FEATURE_M) &&
6969         semihosting_enabled(s->current_el == 0) &&
6970         (a->imm == semihost_imm)) {
6971         gen_exception_internal_insn(s, EXCP_SEMIHOST);
6972     } else {
6973         if (s->fgt_svc) {
6974             uint32_t syndrome = syn_aa32_svc(a->imm, s->thumb);
6975             gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2);
6976         } else {
6977             gen_update_pc(s, curr_insn_len(s));
6978             s->svc_imm = a->imm;
6979             s->base.is_jmp = DISAS_SWI;
6980         }
6981     }
6982     return true;
6983 }
6984 
6985 /*
6986  * Unconditional system instructions
6987  */
6988 
trans_RFE(DisasContext * s,arg_RFE * a)6989 static bool trans_RFE(DisasContext *s, arg_RFE *a)
6990 {
6991     static const int8_t pre_offset[4] = {
6992         /* DA */ -4, /* IA */ 0, /* DB */ -8, /* IB */ 4
6993     };
6994     static const int8_t post_offset[4] = {
6995         /* DA */ -8, /* IA */ 4, /* DB */ -4, /* IB */ 0
6996     };
6997     TCGv_i32 addr, t1, t2;
6998 
6999     if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
7000         return false;
7001     }
7002     if (IS_USER(s)) {
7003         unallocated_encoding(s);
7004         return true;
7005     }
7006 
7007     addr = load_reg(s, a->rn);
7008     tcg_gen_addi_i32(addr, addr, pre_offset[a->pu]);
7009 
7010     /* Load PC into tmp and CPSR into tmp2.  */
7011     t1 = tcg_temp_new_i32();
7012     gen_aa32_ld_i32(s, t1, addr, get_mem_index(s), MO_UL | MO_ALIGN);
7013     tcg_gen_addi_i32(addr, addr, 4);
7014     t2 = tcg_temp_new_i32();
7015     gen_aa32_ld_i32(s, t2, addr, get_mem_index(s), MO_UL | MO_ALIGN);
7016 
7017     if (a->w) {
7018         /* Base writeback.  */
7019         tcg_gen_addi_i32(addr, addr, post_offset[a->pu]);
7020         store_reg(s, a->rn, addr);
7021     }
7022     gen_rfe(s, t1, t2);
7023     return true;
7024 }
7025 
trans_SRS(DisasContext * s,arg_SRS * a)7026 static bool trans_SRS(DisasContext *s, arg_SRS *a)
7027 {
7028     if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
7029         return false;
7030     }
7031     gen_srs(s, a->mode, a->pu, a->w);
7032     return true;
7033 }
7034 
trans_CPS(DisasContext * s,arg_CPS * a)7035 static bool trans_CPS(DisasContext *s, arg_CPS *a)
7036 {
7037     uint32_t mask, val;
7038 
7039     if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
7040         return false;
7041     }
7042     if (IS_USER(s)) {
7043         /* Implemented as NOP in user mode.  */
7044         return true;
7045     }
7046     /* TODO: There are quite a lot of UNPREDICTABLE argument combinations. */
7047 
7048     mask = val = 0;
7049     if (a->imod & 2) {
7050         if (a->A) {
7051             mask |= CPSR_A;
7052         }
7053         if (a->I) {
7054             mask |= CPSR_I;
7055         }
7056         if (a->F) {
7057             mask |= CPSR_F;
7058         }
7059         if (a->imod & 1) {
7060             val |= mask;
7061         }
7062     }
7063     if (a->M) {
7064         mask |= CPSR_M;
7065         val |= a->mode;
7066     }
7067     if (mask) {
7068         gen_set_psr_im(s, mask, 0, val);
7069     }
7070     return true;
7071 }
7072 
trans_CPS_v7m(DisasContext * s,arg_CPS_v7m * a)7073 static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a)
7074 {
7075     TCGv_i32 tmp, addr;
7076 
7077     if (!arm_dc_feature(s, ARM_FEATURE_M)) {
7078         return false;
7079     }
7080     if (IS_USER(s)) {
7081         /* Implemented as NOP in user mode.  */
7082         return true;
7083     }
7084 
7085     tmp = tcg_constant_i32(a->im);
7086     /* FAULTMASK */
7087     if (a->F) {
7088         addr = tcg_constant_i32(19);
7089         gen_helper_v7m_msr(tcg_env, addr, tmp);
7090     }
7091     /* PRIMASK */
7092     if (a->I) {
7093         addr = tcg_constant_i32(16);
7094         gen_helper_v7m_msr(tcg_env, addr, tmp);
7095     }
7096     gen_rebuild_hflags(s, false);
7097     gen_lookup_tb(s);
7098     return true;
7099 }
7100 
7101 /*
7102  * Clear-Exclusive, Barriers
7103  */
7104 
trans_CLREX(DisasContext * s,arg_CLREX * a)7105 static bool trans_CLREX(DisasContext *s, arg_CLREX *a)
7106 {
7107     if (s->thumb
7108         ? !ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)
7109         : !ENABLE_ARCH_6K) {
7110         return false;
7111     }
7112     gen_clrex(s);
7113     return true;
7114 }
7115 
trans_DSB(DisasContext * s,arg_DSB * a)7116 static bool trans_DSB(DisasContext *s, arg_DSB *a)
7117 {
7118     if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
7119         return false;
7120     }
7121     tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
7122     return true;
7123 }
7124 
trans_DMB(DisasContext * s,arg_DMB * a)7125 static bool trans_DMB(DisasContext *s, arg_DMB *a)
7126 {
7127     return trans_DSB(s, NULL);
7128 }
7129 
trans_ISB(DisasContext * s,arg_ISB * a)7130 static bool trans_ISB(DisasContext *s, arg_ISB *a)
7131 {
7132     if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
7133         return false;
7134     }
7135     /*
7136      * We need to break the TB after this insn to execute
7137      * self-modifying code correctly and also to take
7138      * any pending interrupts immediately.
7139      */
7140     s->base.is_jmp = DISAS_TOO_MANY;
7141     return true;
7142 }
7143 
trans_SB(DisasContext * s,arg_SB * a)7144 static bool trans_SB(DisasContext *s, arg_SB *a)
7145 {
7146     if (!dc_isar_feature(aa32_sb, s)) {
7147         return false;
7148     }
7149     /*
7150      * TODO: There is no speculation barrier opcode
7151      * for TCG; MB and end the TB instead.
7152      */
7153     tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
7154     s->base.is_jmp = DISAS_TOO_MANY;
7155     return true;
7156 }
7157 
trans_SETEND(DisasContext * s,arg_SETEND * a)7158 static bool trans_SETEND(DisasContext *s, arg_SETEND *a)
7159 {
7160     if (!ENABLE_ARCH_6) {
7161         return false;
7162     }
7163     if (a->E != (s->be_data == MO_BE)) {
7164         gen_helper_setend(tcg_env);
7165         s->base.is_jmp = DISAS_UPDATE_EXIT;
7166     }
7167     return true;
7168 }
7169 
7170 /*
7171  * Preload instructions
7172  * All are nops, contingent on the appropriate arch level.
7173  */
7174 
trans_PLD(DisasContext * s,arg_PLD * a)7175 static bool trans_PLD(DisasContext *s, arg_PLD *a)
7176 {
7177     return ENABLE_ARCH_5TE;
7178 }
7179 
trans_PLDW(DisasContext * s,arg_PLDW * a)7180 static bool trans_PLDW(DisasContext *s, arg_PLDW *a)
7181 {
7182     return arm_dc_feature(s, ARM_FEATURE_V7MP);
7183 }
7184 
trans_PLI(DisasContext * s,arg_PLI * a)7185 static bool trans_PLI(DisasContext *s, arg_PLI *a)
7186 {
7187     return ENABLE_ARCH_7;
7188 }
7189 
7190 /*
7191  * If-then
7192  */
7193 
trans_IT(DisasContext * s,arg_IT * a)7194 static bool trans_IT(DisasContext *s, arg_IT *a)
7195 {
7196     int cond_mask = a->cond_mask;
7197 
7198     /*
7199      * No actual code generated for this insn, just setup state.
7200      *
7201      * Combinations of firstcond and mask which set up an 0b1111
7202      * condition are UNPREDICTABLE; we take the CONSTRAINED
7203      * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
7204      * i.e. both meaning "execute always".
7205      */
7206     s->condexec_cond = (cond_mask >> 4) & 0xe;
7207     s->condexec_mask = cond_mask & 0x1f;
7208     return true;
7209 }
7210 
7211 /* v8.1M CSEL/CSINC/CSNEG/CSINV */
trans_CSEL(DisasContext * s,arg_CSEL * a)7212 static bool trans_CSEL(DisasContext *s, arg_CSEL *a)
7213 {
7214     TCGv_i32 rn, rm;
7215     DisasCompare c;
7216 
7217     if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
7218         return false;
7219     }
7220 
7221     if (a->rm == 13) {
7222         /* SEE "Related encodings" (MVE shifts) */
7223         return false;
7224     }
7225 
7226     if (a->rd == 13 || a->rd == 15 || a->rn == 13 || a->fcond >= 14) {
7227         /* CONSTRAINED UNPREDICTABLE: we choose to UNDEF */
7228         return false;
7229     }
7230 
7231     /* In this insn input reg fields of 0b1111 mean "zero", not "PC" */
7232     rn = tcg_temp_new_i32();
7233     rm = tcg_temp_new_i32();
7234     if (a->rn == 15) {
7235         tcg_gen_movi_i32(rn, 0);
7236     } else {
7237         load_reg_var(s, rn, a->rn);
7238     }
7239     if (a->rm == 15) {
7240         tcg_gen_movi_i32(rm, 0);
7241     } else {
7242         load_reg_var(s, rm, a->rm);
7243     }
7244 
7245     switch (a->op) {
7246     case 0: /* CSEL */
7247         break;
7248     case 1: /* CSINC */
7249         tcg_gen_addi_i32(rm, rm, 1);
7250         break;
7251     case 2: /* CSINV */
7252         tcg_gen_not_i32(rm, rm);
7253         break;
7254     case 3: /* CSNEG */
7255         tcg_gen_neg_i32(rm, rm);
7256         break;
7257     default:
7258         g_assert_not_reached();
7259     }
7260 
7261     arm_test_cc(&c, a->fcond);
7262     tcg_gen_movcond_i32(c.cond, rn, c.value, tcg_constant_i32(0), rn, rm);
7263 
7264     store_reg(s, a->rd, rn);
7265     return true;
7266 }
7267 
7268 /*
7269  * Legacy decoder.
7270  */
7271 
disas_arm_insn(DisasContext * s,unsigned int insn)7272 static void disas_arm_insn(DisasContext *s, unsigned int insn)
7273 {
7274     unsigned int cond = insn >> 28;
7275 
7276     /* M variants do not implement ARM mode; this must raise the INVSTATE
7277      * UsageFault exception.
7278      */
7279     if (arm_dc_feature(s, ARM_FEATURE_M)) {
7280         gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized());
7281         return;
7282     }
7283 
7284     if (s->pstate_il) {
7285         /*
7286          * Illegal execution state. This has priority over BTI
7287          * exceptions, but comes after instruction abort exceptions.
7288          */
7289         gen_exception_insn(s, 0, EXCP_UDEF, syn_illegalstate());
7290         return;
7291     }
7292 
7293     if (cond == 0xf) {
7294         /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7295          * choose to UNDEF. In ARMv5 and above the space is used
7296          * for miscellaneous unconditional instructions.
7297          */
7298         if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
7299             unallocated_encoding(s);
7300             return;
7301         }
7302 
7303         /* Unconditional instructions.  */
7304         /* TODO: Perhaps merge these into one decodetree output file.  */
7305         if (disas_a32_uncond(s, insn) ||
7306             disas_vfp_uncond(s, insn) ||
7307             disas_neon_dp(s, insn) ||
7308             disas_neon_ls(s, insn) ||
7309             disas_neon_shared(s, insn)) {
7310             return;
7311         }
7312         /* fall back to legacy decoder */
7313 
7314         if ((insn & 0x0e000f00) == 0x0c000100) {
7315             if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7316                 /* iWMMXt register transfer.  */
7317                 if (extract32(s->c15_cpar, 1, 1)) {
7318                     if (!disas_iwmmxt_insn(s, insn)) {
7319                         return;
7320                     }
7321                 }
7322             }
7323         }
7324         goto illegal_op;
7325     }
7326     if (cond != 0xe) {
7327         /* if not always execute, we generate a conditional jump to
7328            next instruction */
7329         arm_skip_unless(s, cond);
7330     }
7331 
7332     /* TODO: Perhaps merge these into one decodetree output file.  */
7333     if (disas_a32(s, insn) ||
7334         disas_vfp(s, insn)) {
7335         return;
7336     }
7337     /* fall back to legacy decoder */
7338     /* TODO: convert xscale/iwmmxt decoder to decodetree ?? */
7339     if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7340         if (((insn & 0x0c000e00) == 0x0c000000)
7341             && ((insn & 0x03000000) != 0x03000000)) {
7342             /* Coprocessor insn, coprocessor 0 or 1 */
7343             disas_xscale_insn(s, insn);
7344             return;
7345         }
7346     }
7347 
7348 illegal_op:
7349     unallocated_encoding(s);
7350 }
7351 
thumb_insn_is_16bit(DisasContext * s,uint32_t pc,uint32_t insn)7352 static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
7353 {
7354     /*
7355      * Return true if this is a 16 bit instruction. We must be precise
7356      * about this (matching the decode).
7357      */
7358     if ((insn >> 11) < 0x1d) {
7359         /* Definitely a 16-bit instruction */
7360         return true;
7361     }
7362 
7363     /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
7364      * first half of a 32-bit Thumb insn. Thumb-1 cores might
7365      * end up actually treating this as two 16-bit insns, though,
7366      * if it's half of a bl/blx pair that might span a page boundary.
7367      */
7368     if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
7369         arm_dc_feature(s, ARM_FEATURE_M)) {
7370         /* Thumb2 cores (including all M profile ones) always treat
7371          * 32-bit insns as 32-bit.
7372          */
7373         return false;
7374     }
7375 
7376     if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
7377         /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
7378          * is not on the next page; we merge this into a 32-bit
7379          * insn.
7380          */
7381         return false;
7382     }
7383     /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
7384      * 0b1111_1xxx_xxxx_xxxx : BL suffix;
7385      * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
7386      *  -- handle as single 16 bit insn
7387      */
7388     return true;
7389 }
7390 
7391 /* Translate a 32-bit thumb instruction. */
disas_thumb2_insn(DisasContext * s,uint32_t insn)7392 static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
7393 {
7394     /*
7395      * ARMv6-M supports a limited subset of Thumb2 instructions.
7396      * Other Thumb1 architectures allow only 32-bit
7397      * combined BL/BLX prefix and suffix.
7398      */
7399     if (arm_dc_feature(s, ARM_FEATURE_M) &&
7400         !arm_dc_feature(s, ARM_FEATURE_V7)) {
7401         int i;
7402         bool found = false;
7403         static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
7404                                                0xf3b08040 /* dsb */,
7405                                                0xf3b08050 /* dmb */,
7406                                                0xf3b08060 /* isb */,
7407                                                0xf3e08000 /* mrs */,
7408                                                0xf000d000 /* bl */};
7409         static const uint32_t armv6m_mask[] = {0xffe0d000,
7410                                                0xfff0d0f0,
7411                                                0xfff0d0f0,
7412                                                0xfff0d0f0,
7413                                                0xffe0d000,
7414                                                0xf800d000};
7415 
7416         for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
7417             if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
7418                 found = true;
7419                 break;
7420             }
7421         }
7422         if (!found) {
7423             goto illegal_op;
7424         }
7425     } else if ((insn & 0xf800e800) != 0xf000e800)  {
7426         if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
7427             unallocated_encoding(s);
7428             return;
7429         }
7430     }
7431 
7432     if (arm_dc_feature(s, ARM_FEATURE_M)) {
7433         /*
7434          * NOCP takes precedence over any UNDEF for (almost) the
7435          * entire wide range of coprocessor-space encodings, so check
7436          * for it first before proceeding to actually decode eg VFP
7437          * insns. This decode also handles the few insns which are
7438          * in copro space but do not have NOCP checks (eg VLLDM, VLSTM).
7439          */
7440         if (disas_m_nocp(s, insn)) {
7441             return;
7442         }
7443     }
7444 
7445     if ((insn & 0xef000000) == 0xef000000) {
7446         /*
7447          * T32 encodings 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
7448          * transform into
7449          * A32 encodings 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
7450          */
7451         uint32_t a32_insn = (insn & 0xe2ffffff) |
7452             ((insn & (1 << 28)) >> 4) | (1 << 28);
7453 
7454         if (disas_neon_dp(s, a32_insn)) {
7455             return;
7456         }
7457     }
7458 
7459     if ((insn & 0xff100000) == 0xf9000000) {
7460         /*
7461          * T32 encodings 0b1111_1001_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
7462          * transform into
7463          * A32 encodings 0b1111_0100_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
7464          */
7465         uint32_t a32_insn = (insn & 0x00ffffff) | 0xf4000000;
7466 
7467         if (disas_neon_ls(s, a32_insn)) {
7468             return;
7469         }
7470     }
7471 
7472     /*
7473      * TODO: Perhaps merge these into one decodetree output file.
7474      * Note disas_vfp is written for a32 with cond field in the
7475      * top nibble.  The t32 encoding requires 0xe in the top nibble.
7476      */
7477     if (disas_t32(s, insn) ||
7478         disas_vfp_uncond(s, insn) ||
7479         disas_neon_shared(s, insn) ||
7480         disas_mve(s, insn) ||
7481         ((insn >> 28) == 0xe && disas_vfp(s, insn))) {
7482         return;
7483     }
7484 
7485 illegal_op:
7486     unallocated_encoding(s);
7487 }
7488 
disas_thumb_insn(DisasContext * s,uint32_t insn)7489 static void disas_thumb_insn(DisasContext *s, uint32_t insn)
7490 {
7491     if (!disas_t16(s, insn)) {
7492         unallocated_encoding(s);
7493     }
7494 }
7495 
insn_crosses_page(CPUARMState * env,DisasContext * s)7496 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
7497 {
7498     /* Return true if the insn at dc->base.pc_next might cross a page boundary.
7499      * (False positives are OK, false negatives are not.)
7500      * We know this is a Thumb insn, and our caller ensures we are
7501      * only called if dc->base.pc_next is less than 4 bytes from the page
7502      * boundary, so we cross the page if the first 16 bits indicate
7503      * that this is a 32 bit insn.
7504      */
7505     uint16_t insn = arm_lduw_code(env, &s->base, s->base.pc_next, s->sctlr_b);
7506 
7507     return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
7508 }
7509 
arm_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cs)7510 static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
7511 {
7512     DisasContext *dc = container_of(dcbase, DisasContext, base);
7513     CPUARMState *env = cpu_env(cs);
7514     ARMCPU *cpu = env_archcpu(env);
7515     CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
7516     uint32_t condexec, core_mmu_idx;
7517 
7518     dc->isar = &cpu->isar;
7519     dc->condjmp = 0;
7520     dc->pc_save = dc->base.pc_first;
7521     dc->aarch64 = false;
7522     dc->thumb = EX_TBFLAG_AM32(tb_flags, THUMB);
7523     dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
7524     condexec = EX_TBFLAG_AM32(tb_flags, CONDEXEC);
7525     /*
7526      * the CONDEXEC TB flags are CPSR bits [15:10][26:25]. On A-profile this
7527      * is always the IT bits. On M-profile, some of the reserved encodings
7528      * of IT are used instead to indicate either ICI or ECI, which
7529      * indicate partial progress of a restartable insn that was interrupted
7530      * partway through by an exception:
7531      *  * if CONDEXEC[3:0] != 0b0000 : CONDEXEC is IT bits
7532      *  * if CONDEXEC[3:0] == 0b0000 : CONDEXEC is ICI or ECI bits
7533      * In all cases CONDEXEC == 0 means "not in IT block or restartable
7534      * insn, behave normally".
7535      */
7536     dc->eci = dc->condexec_mask = dc->condexec_cond = 0;
7537     dc->eci_handled = false;
7538     if (condexec & 0xf) {
7539         dc->condexec_mask = (condexec & 0xf) << 1;
7540         dc->condexec_cond = condexec >> 4;
7541     } else {
7542         if (arm_feature(env, ARM_FEATURE_M)) {
7543             dc->eci = condexec >> 4;
7544         }
7545     }
7546 
7547     core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
7548     dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
7549     dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
7550 #if !defined(CONFIG_USER_ONLY)
7551     dc->user = (dc->current_el == 0);
7552 #endif
7553     dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
7554     dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
7555     dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
7556     dc->fgt_active = EX_TBFLAG_ANY(tb_flags, FGT_ACTIVE);
7557     dc->fgt_svc = EX_TBFLAG_ANY(tb_flags, FGT_SVC);
7558 
7559     if (arm_feature(env, ARM_FEATURE_M)) {
7560         dc->vfp_enabled = 1;
7561         dc->be_data = MO_TE;
7562         dc->v7m_handler_mode = EX_TBFLAG_M32(tb_flags, HANDLER);
7563         dc->v8m_secure = EX_TBFLAG_M32(tb_flags, SECURE);
7564         dc->v8m_stackcheck = EX_TBFLAG_M32(tb_flags, STACKCHECK);
7565         dc->v8m_fpccr_s_wrong = EX_TBFLAG_M32(tb_flags, FPCCR_S_WRONG);
7566         dc->v7m_new_fp_ctxt_needed =
7567             EX_TBFLAG_M32(tb_flags, NEW_FP_CTXT_NEEDED);
7568         dc->v7m_lspact = EX_TBFLAG_M32(tb_flags, LSPACT);
7569         dc->mve_no_pred = EX_TBFLAG_M32(tb_flags, MVE_NO_PRED);
7570     } else {
7571         dc->sctlr_b = EX_TBFLAG_A32(tb_flags, SCTLR__B);
7572         dc->hstr_active = EX_TBFLAG_A32(tb_flags, HSTR_ACTIVE);
7573         dc->ns = EX_TBFLAG_A32(tb_flags, NS);
7574         dc->vfp_enabled = EX_TBFLAG_A32(tb_flags, VFPEN);
7575         if (arm_feature(env, ARM_FEATURE_XSCALE)) {
7576             dc->c15_cpar = EX_TBFLAG_A32(tb_flags, XSCALE_CPAR);
7577         } else {
7578             dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN);
7579             dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE);
7580         }
7581         dc->sme_trap_nonstreaming =
7582             EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING);
7583     }
7584     dc->lse2 = false; /* applies only to aarch64 */
7585     dc->cp_regs = cpu->cp_regs;
7586     dc->features = env->features;
7587 
7588     /* Single step state. The code-generation logic here is:
7589      *  SS_ACTIVE == 0:
7590      *   generate code with no special handling for single-stepping (except
7591      *   that anything that can make us go to SS_ACTIVE == 1 must end the TB;
7592      *   this happens anyway because those changes are all system register or
7593      *   PSTATE writes).
7594      *  SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
7595      *   emit code for one insn
7596      *   emit code to clear PSTATE.SS
7597      *   emit code to generate software step exception for completed step
7598      *   end TB (as usual for having generated an exception)
7599      *  SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
7600      *   emit code to generate a software step exception
7601      *   end the TB
7602      */
7603     dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
7604     dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
7605     dc->is_ldex = false;
7606 
7607     dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
7608 
7609     /* If architectural single step active, limit to 1.  */
7610     if (dc->ss_active) {
7611         dc->base.max_insns = 1;
7612     }
7613 
7614     /* ARM is a fixed-length ISA.  Bound the number of insns to execute
7615        to those left on the page.  */
7616     if (!dc->thumb) {
7617         int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
7618         dc->base.max_insns = MIN(dc->base.max_insns, bound);
7619     }
7620 
7621     cpu_V0 = tcg_temp_new_i64();
7622     cpu_V1 = tcg_temp_new_i64();
7623     cpu_M0 = tcg_temp_new_i64();
7624 }
7625 
arm_tr_tb_start(DisasContextBase * dcbase,CPUState * cpu)7626 static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
7627 {
7628     DisasContext *dc = container_of(dcbase, DisasContext, base);
7629 
7630     /* A note on handling of the condexec (IT) bits:
7631      *
7632      * We want to avoid the overhead of having to write the updated condexec
7633      * bits back to the CPUARMState for every instruction in an IT block. So:
7634      * (1) if the condexec bits are not already zero then we write
7635      * zero back into the CPUARMState now. This avoids complications trying
7636      * to do it at the end of the block. (For example if we don't do this
7637      * it's hard to identify whether we can safely skip writing condexec
7638      * at the end of the TB, which we definitely want to do for the case
7639      * where a TB doesn't do anything with the IT state at all.)
7640      * (2) if we are going to leave the TB then we call gen_set_condexec()
7641      * which will write the correct value into CPUARMState if zero is wrong.
7642      * This is done both for leaving the TB at the end, and for leaving
7643      * it because of an exception we know will happen, which is done in
7644      * gen_exception_insn(). The latter is necessary because we need to
7645      * leave the TB with the PC/IT state just prior to execution of the
7646      * instruction which caused the exception.
7647      * (3) if we leave the TB unexpectedly (eg a data abort on a load)
7648      * then the CPUARMState will be wrong and we need to reset it.
7649      * This is handled in the same way as restoration of the
7650      * PC in these situations; we save the value of the condexec bits
7651      * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
7652      * then uses this to restore them after an exception.
7653      *
7654      * Note that there are no instructions which can read the condexec
7655      * bits, and none which can write non-static values to them, so
7656      * we don't need to care about whether CPUARMState is correct in the
7657      * middle of a TB.
7658      */
7659 
7660     /* Reset the conditional execution bits immediately. This avoids
7661        complications trying to do it at the end of the block.  */
7662     if (dc->condexec_mask || dc->condexec_cond) {
7663         store_cpu_field_constant(0, condexec_bits);
7664     }
7665 }
7666 
arm_tr_insn_start(DisasContextBase * dcbase,CPUState * cpu)7667 static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
7668 {
7669     DisasContext *dc = container_of(dcbase, DisasContext, base);
7670     /*
7671      * The ECI/ICI bits share PSR bits with the IT bits, so we
7672      * need to reconstitute the bits from the split-out DisasContext
7673      * fields here.
7674      */
7675     uint32_t condexec_bits;
7676     target_ulong pc_arg = dc->base.pc_next;
7677 
7678     if (tb_cflags(dcbase->tb) & CF_PCREL) {
7679         pc_arg &= ~TARGET_PAGE_MASK;
7680     }
7681     if (dc->eci) {
7682         condexec_bits = dc->eci << 4;
7683     } else {
7684         condexec_bits = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
7685     }
7686     tcg_gen_insn_start(pc_arg, condexec_bits, 0);
7687     dc->insn_start_updated = false;
7688 }
7689 
arm_check_kernelpage(DisasContext * dc)7690 static bool arm_check_kernelpage(DisasContext *dc)
7691 {
7692 #ifdef CONFIG_USER_ONLY
7693     /* Intercept jump to the magic kernel page.  */
7694     if (dc->base.pc_next >= 0xffff0000) {
7695         /* We always get here via a jump, so know we are not in a
7696            conditional execution block.  */
7697         gen_exception_internal(EXCP_KERNEL_TRAP);
7698         dc->base.is_jmp = DISAS_NORETURN;
7699         return true;
7700     }
7701 #endif
7702     return false;
7703 }
7704 
arm_check_ss_active(DisasContext * dc)7705 static bool arm_check_ss_active(DisasContext *dc)
7706 {
7707     if (dc->ss_active && !dc->pstate_ss) {
7708         /* Singlestep state is Active-pending.
7709          * If we're in this state at the start of a TB then either
7710          *  a) we just took an exception to an EL which is being debugged
7711          *     and this is the first insn in the exception handler
7712          *  b) debug exceptions were masked and we just unmasked them
7713          *     without changing EL (eg by clearing PSTATE.D)
7714          * In either case we're going to take a swstep exception in the
7715          * "did not step an insn" case, and so the syndrome ISV and EX
7716          * bits should be zero.
7717          */
7718         assert(dc->base.num_insns == 1);
7719         gen_swstep_exception(dc, 0, 0);
7720         dc->base.is_jmp = DISAS_NORETURN;
7721         return true;
7722     }
7723 
7724     return false;
7725 }
7726 
arm_post_translate_insn(DisasContext * dc)7727 static void arm_post_translate_insn(DisasContext *dc)
7728 {
7729     if (dc->condjmp && dc->base.is_jmp == DISAS_NEXT) {
7730         if (dc->pc_save != dc->condlabel.pc_save) {
7731             gen_update_pc(dc, dc->condlabel.pc_save - dc->pc_save);
7732         }
7733         gen_set_label(dc->condlabel.label);
7734         dc->condjmp = 0;
7735     }
7736 }
7737 
arm_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)7738 static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
7739 {
7740     DisasContext *dc = container_of(dcbase, DisasContext, base);
7741     CPUARMState *env = cpu_env(cpu);
7742     uint32_t pc = dc->base.pc_next;
7743     unsigned int insn;
7744 
7745     /* Singlestep exceptions have the highest priority. */
7746     if (arm_check_ss_active(dc)) {
7747         dc->base.pc_next = pc + 4;
7748         return;
7749     }
7750 
7751     if (pc & 3) {
7752         /*
7753          * PC alignment fault.  This has priority over the instruction abort
7754          * that we would receive from a translation fault via arm_ldl_code
7755          * (or the execution of the kernelpage entrypoint). This should only
7756          * be possible after an indirect branch, at the start of the TB.
7757          */
7758         assert(dc->base.num_insns == 1);
7759         gen_helper_exception_pc_alignment(tcg_env, tcg_constant_tl(pc));
7760         dc->base.is_jmp = DISAS_NORETURN;
7761         dc->base.pc_next = QEMU_ALIGN_UP(pc, 4);
7762         return;
7763     }
7764 
7765     if (arm_check_kernelpage(dc)) {
7766         dc->base.pc_next = pc + 4;
7767         return;
7768     }
7769 
7770     dc->pc_curr = pc;
7771     insn = arm_ldl_code(env, &dc->base, pc, dc->sctlr_b);
7772     dc->insn = insn;
7773     dc->base.pc_next = pc + 4;
7774     disas_arm_insn(dc, insn);
7775 
7776     arm_post_translate_insn(dc);
7777 
7778     /* ARM is a fixed-length ISA.  We performed the cross-page check
7779        in init_disas_context by adjusting max_insns.  */
7780 }
7781 
thumb_insn_is_unconditional(DisasContext * s,uint32_t insn)7782 static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
7783 {
7784     /* Return true if this Thumb insn is always unconditional,
7785      * even inside an IT block. This is true of only a very few
7786      * instructions: BKPT, HLT, and SG.
7787      *
7788      * A larger class of instructions are UNPREDICTABLE if used
7789      * inside an IT block; we do not need to detect those here, because
7790      * what we do by default (perform the cc check and update the IT
7791      * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
7792      * choice for those situations.
7793      *
7794      * insn is either a 16-bit or a 32-bit instruction; the two are
7795      * distinguishable because for the 16-bit case the top 16 bits
7796      * are zeroes, and that isn't a valid 32-bit encoding.
7797      */
7798     if ((insn & 0xffffff00) == 0xbe00) {
7799         /* BKPT */
7800         return true;
7801     }
7802 
7803     if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
7804         !arm_dc_feature(s, ARM_FEATURE_M)) {
7805         /* HLT: v8A only. This is unconditional even when it is going to
7806          * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
7807          * For v7 cores this was a plain old undefined encoding and so
7808          * honours its cc check. (We might be using the encoding as
7809          * a semihosting trap, but we don't change the cc check behaviour
7810          * on that account, because a debugger connected to a real v7A
7811          * core and emulating semihosting traps by catching the UNDEF
7812          * exception would also only see cases where the cc check passed.
7813          * No guest code should be trying to do a HLT semihosting trap
7814          * in an IT block anyway.
7815          */
7816         return true;
7817     }
7818 
7819     if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
7820         arm_dc_feature(s, ARM_FEATURE_M)) {
7821         /* SG: v8M only */
7822         return true;
7823     }
7824 
7825     return false;
7826 }
7827 
thumb_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)7828 static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
7829 {
7830     DisasContext *dc = container_of(dcbase, DisasContext, base);
7831     CPUARMState *env = cpu_env(cpu);
7832     uint32_t pc = dc->base.pc_next;
7833     uint32_t insn;
7834     bool is_16bit;
7835     /* TCG op to rewind to if this turns out to be an invalid ECI state */
7836     TCGOp *insn_eci_rewind = NULL;
7837     target_ulong insn_eci_pc_save = -1;
7838 
7839     /* Misaligned thumb PC is architecturally impossible. */
7840     assert((dc->base.pc_next & 1) == 0);
7841 
7842     if (arm_check_ss_active(dc) || arm_check_kernelpage(dc)) {
7843         dc->base.pc_next = pc + 2;
7844         return;
7845     }
7846 
7847     dc->pc_curr = pc;
7848     insn = arm_lduw_code(env, &dc->base, pc, dc->sctlr_b);
7849     is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
7850     pc += 2;
7851     if (!is_16bit) {
7852         uint32_t insn2 = arm_lduw_code(env, &dc->base, pc, dc->sctlr_b);
7853         insn = insn << 16 | insn2;
7854         pc += 2;
7855     }
7856     dc->base.pc_next = pc;
7857     dc->insn = insn;
7858 
7859     if (dc->pstate_il) {
7860         /*
7861          * Illegal execution state. This has priority over BTI
7862          * exceptions, but comes after instruction abort exceptions.
7863          */
7864         gen_exception_insn(dc, 0, EXCP_UDEF, syn_illegalstate());
7865         return;
7866     }
7867 
7868     if (dc->eci) {
7869         /*
7870          * For M-profile continuable instructions, ECI/ICI handling
7871          * falls into these cases:
7872          *  - interrupt-continuable instructions
7873          *     These are the various load/store multiple insns (both
7874          *     integer and fp). The ICI bits indicate the register
7875          *     where the load/store can resume. We make the IMPDEF
7876          *     choice to always do "instruction restart", ie ignore
7877          *     the ICI value and always execute the ldm/stm from the
7878          *     start. So all we need to do is zero PSR.ICI if the
7879          *     insn executes.
7880          *  - MVE instructions subject to beat-wise execution
7881          *     Here the ECI bits indicate which beats have already been
7882          *     executed, and we must honour this. Each insn of this
7883          *     type will handle it correctly. We will update PSR.ECI
7884          *     in the helper function for the insn (some ECI values
7885          *     mean that the following insn also has been partially
7886          *     executed).
7887          *  - Special cases which don't advance ECI
7888          *     The insns LE, LETP and BKPT leave the ECI/ICI state
7889          *     bits untouched.
7890          *  - all other insns (the common case)
7891          *     Non-zero ECI/ICI means an INVSTATE UsageFault.
7892          *     We place a rewind-marker here. Insns in the previous
7893          *     three categories will set a flag in the DisasContext.
7894          *     If the flag isn't set after we call disas_thumb_insn()
7895          *     or disas_thumb2_insn() then we know we have a "some other
7896          *     insn" case. We will rewind to the marker (ie throwing away
7897          *     all the generated code) and instead emit "take exception".
7898          */
7899         insn_eci_rewind = tcg_last_op();
7900         insn_eci_pc_save = dc->pc_save;
7901     }
7902 
7903     if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
7904         uint32_t cond = dc->condexec_cond;
7905 
7906         /*
7907          * Conditionally skip the insn. Note that both 0xe and 0xf mean
7908          * "always"; 0xf is not "never".
7909          */
7910         if (cond < 0x0e) {
7911             arm_skip_unless(dc, cond);
7912         }
7913     }
7914 
7915     if (is_16bit) {
7916         disas_thumb_insn(dc, insn);
7917     } else {
7918         disas_thumb2_insn(dc, insn);
7919     }
7920 
7921     /* Advance the Thumb condexec condition.  */
7922     if (dc->condexec_mask) {
7923         dc->condexec_cond = ((dc->condexec_cond & 0xe) |
7924                              ((dc->condexec_mask >> 4) & 1));
7925         dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
7926         if (dc->condexec_mask == 0) {
7927             dc->condexec_cond = 0;
7928         }
7929     }
7930 
7931     if (dc->eci && !dc->eci_handled) {
7932         /*
7933          * Insn wasn't valid for ECI/ICI at all: undo what we
7934          * just generated and instead emit an exception
7935          */
7936         tcg_remove_ops_after(insn_eci_rewind);
7937         dc->pc_save = insn_eci_pc_save;
7938         dc->condjmp = 0;
7939         gen_exception_insn(dc, 0, EXCP_INVSTATE, syn_uncategorized());
7940     }
7941 
7942     arm_post_translate_insn(dc);
7943 
7944     /* Thumb is a variable-length ISA.  Stop translation when the next insn
7945      * will touch a new page.  This ensures that prefetch aborts occur at
7946      * the right place.
7947      *
7948      * We want to stop the TB if the next insn starts in a new page,
7949      * or if it spans between this page and the next. This means that
7950      * if we're looking at the last halfword in the page we need to
7951      * see if it's a 16-bit Thumb insn (which will fit in this TB)
7952      * or a 32-bit Thumb insn (which won't).
7953      * This is to avoid generating a silly TB with a single 16-bit insn
7954      * in it at the end of this page (which would execute correctly
7955      * but isn't very efficient).
7956      */
7957     if (dc->base.is_jmp == DISAS_NEXT
7958         && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
7959             || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
7960                 && insn_crosses_page(env, dc)))) {
7961         dc->base.is_jmp = DISAS_TOO_MANY;
7962     }
7963 }
7964 
arm_tr_tb_stop(DisasContextBase * dcbase,CPUState * cpu)7965 static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
7966 {
7967     DisasContext *dc = container_of(dcbase, DisasContext, base);
7968 
7969     /* At this stage dc->condjmp will only be set when the skipped
7970        instruction was a conditional branch or trap, and the PC has
7971        already been written.  */
7972     gen_set_condexec(dc);
7973     if (dc->base.is_jmp == DISAS_BX_EXCRET) {
7974         /* Exception return branches need some special case code at the
7975          * end of the TB, which is complex enough that it has to
7976          * handle the single-step vs not and the condition-failed
7977          * insn codepath itself.
7978          */
7979         gen_bx_excret_final_code(dc);
7980     } else if (unlikely(dc->ss_active)) {
7981         /* Unconditional and "condition passed" instruction codepath. */
7982         switch (dc->base.is_jmp) {
7983         case DISAS_SWI:
7984             gen_ss_advance(dc);
7985             gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
7986             break;
7987         case DISAS_HVC:
7988             gen_ss_advance(dc);
7989             gen_exception_el(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7990             break;
7991         case DISAS_SMC:
7992             gen_ss_advance(dc);
7993             gen_exception_el(EXCP_SMC, syn_aa32_smc(), 3);
7994             break;
7995         case DISAS_NEXT:
7996         case DISAS_TOO_MANY:
7997         case DISAS_UPDATE_EXIT:
7998         case DISAS_UPDATE_NOCHAIN:
7999             gen_update_pc(dc, curr_insn_len(dc));
8000             /* fall through */
8001         default:
8002             /* FIXME: Single stepping a WFI insn will not halt the CPU. */
8003             gen_singlestep_exception(dc);
8004             break;
8005         case DISAS_NORETURN:
8006             break;
8007         }
8008     } else {
8009         /* While branches must always occur at the end of an IT block,
8010            there are a few other things that can cause us to terminate
8011            the TB in the middle of an IT block:
8012             - Exception generating instructions (bkpt, swi, undefined).
8013             - Page boundaries.
8014             - Hardware watchpoints.
8015            Hardware breakpoints have already been handled and skip this code.
8016          */
8017         switch (dc->base.is_jmp) {
8018         case DISAS_NEXT:
8019         case DISAS_TOO_MANY:
8020             gen_goto_tb(dc, 1, curr_insn_len(dc));
8021             break;
8022         case DISAS_UPDATE_NOCHAIN:
8023             gen_update_pc(dc, curr_insn_len(dc));
8024             /* fall through */
8025         case DISAS_JUMP:
8026             gen_goto_ptr();
8027             break;
8028         case DISAS_UPDATE_EXIT:
8029             gen_update_pc(dc, curr_insn_len(dc));
8030             /* fall through */
8031         default:
8032             /* indicate that the hash table must be used to find the next TB */
8033             tcg_gen_exit_tb(NULL, 0);
8034             break;
8035         case DISAS_NORETURN:
8036             /* nothing more to generate */
8037             break;
8038         case DISAS_WFI:
8039             gen_helper_wfi(tcg_env, tcg_constant_i32(curr_insn_len(dc)));
8040             /*
8041              * The helper doesn't necessarily throw an exception, but we
8042              * must go back to the main loop to check for interrupts anyway.
8043              */
8044             tcg_gen_exit_tb(NULL, 0);
8045             break;
8046         case DISAS_WFE:
8047             gen_helper_wfe(tcg_env);
8048             break;
8049         case DISAS_YIELD:
8050             gen_helper_yield(tcg_env);
8051             break;
8052         case DISAS_SWI:
8053             gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
8054             break;
8055         case DISAS_HVC:
8056             gen_exception_el(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
8057             break;
8058         case DISAS_SMC:
8059             gen_exception_el(EXCP_SMC, syn_aa32_smc(), 3);
8060             break;
8061         }
8062     }
8063 
8064     if (dc->condjmp) {
8065         /* "Condition failed" instruction codepath for the branch/trap insn */
8066         set_disas_label(dc, dc->condlabel);
8067         gen_set_condexec(dc);
8068         if (unlikely(dc->ss_active)) {
8069             gen_update_pc(dc, curr_insn_len(dc));
8070             gen_singlestep_exception(dc);
8071         } else {
8072             gen_goto_tb(dc, 1, curr_insn_len(dc));
8073         }
8074     }
8075 }
8076 
8077 static const TranslatorOps arm_translator_ops = {
8078     .init_disas_context = arm_tr_init_disas_context,
8079     .tb_start           = arm_tr_tb_start,
8080     .insn_start         = arm_tr_insn_start,
8081     .translate_insn     = arm_tr_translate_insn,
8082     .tb_stop            = arm_tr_tb_stop,
8083 };
8084 
8085 static const TranslatorOps thumb_translator_ops = {
8086     .init_disas_context = arm_tr_init_disas_context,
8087     .tb_start           = arm_tr_tb_start,
8088     .insn_start         = arm_tr_insn_start,
8089     .translate_insn     = thumb_tr_translate_insn,
8090     .tb_stop            = arm_tr_tb_stop,
8091 };
8092 
8093 /* generate intermediate code for basic block 'tb'.  */
gen_intermediate_code(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)8094 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
8095                            vaddr pc, void *host_pc)
8096 {
8097     DisasContext dc = { };
8098     const TranslatorOps *ops = &arm_translator_ops;
8099     CPUARMTBFlags tb_flags = arm_tbflags_from_tb(tb);
8100 
8101     if (EX_TBFLAG_AM32(tb_flags, THUMB)) {
8102         ops = &thumb_translator_ops;
8103     }
8104 #ifdef TARGET_AARCH64
8105     if (EX_TBFLAG_ANY(tb_flags, AARCH64_STATE)) {
8106         ops = &aarch64_translator_ops;
8107     }
8108 #endif
8109 
8110     translator_loop(cpu, tb, max_insns, pc, host_pc, ops, &dc.base);
8111 }
8112