1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include "qemu/osdep.h"
22
23 #include "translate.h"
24 #include "translate-a32.h"
25 #include "qemu/log.h"
26 #include "arm_ldst.h"
27 #include "semihosting/semihost.h"
28 #include "cpregs.h"
29 #include "exec/helper-proto.h"
30
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef HELPER_H
34
35 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
36 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
37 /* currently all emulated v5 cores are also v5TE, so don't bother */
38 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
39 #define ENABLE_ARCH_5J dc_isar_feature(aa32_jazelle, s)
40 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
41 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
42 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
43 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
44 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
45
46 /* These are TCG temporaries used only by the legacy iwMMXt decoder */
47 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
48 /* These are TCG globals which alias CPUARMState fields */
49 static TCGv_i32 cpu_R[16];
50 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
51 TCGv_i64 cpu_exclusive_addr;
52 TCGv_i64 cpu_exclusive_val;
53
54 static const char * const regnames[] =
55 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
56 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
57
58
59 /* initialize TCG globals. */
arm_translate_init(void)60 void arm_translate_init(void)
61 {
62 int i;
63
64 for (i = 0; i < 16; i++) {
65 cpu_R[i] = tcg_global_mem_new_i32(tcg_env,
66 offsetof(CPUARMState, regs[i]),
67 regnames[i]);
68 }
69 cpu_CF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, CF), "CF");
70 cpu_NF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, NF), "NF");
71 cpu_VF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, VF), "VF");
72 cpu_ZF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, ZF), "ZF");
73
74 cpu_exclusive_addr = tcg_global_mem_new_i64(tcg_env,
75 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
76 cpu_exclusive_val = tcg_global_mem_new_i64(tcg_env,
77 offsetof(CPUARMState, exclusive_val), "exclusive_val");
78
79 a64_translate_init();
80 }
81
asimd_imm_const(uint32_t imm,int cmode,int op)82 uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
83 {
84 /* Expand the encoded constant as per AdvSIMDExpandImm pseudocode */
85 switch (cmode) {
86 case 0: case 1:
87 /* no-op */
88 break;
89 case 2: case 3:
90 imm <<= 8;
91 break;
92 case 4: case 5:
93 imm <<= 16;
94 break;
95 case 6: case 7:
96 imm <<= 24;
97 break;
98 case 8: case 9:
99 imm |= imm << 16;
100 break;
101 case 10: case 11:
102 imm = (imm << 8) | (imm << 24);
103 break;
104 case 12:
105 imm = (imm << 8) | 0xff;
106 break;
107 case 13:
108 imm = (imm << 16) | 0xffff;
109 break;
110 case 14:
111 if (op) {
112 /*
113 * This and cmode == 15 op == 1 are the only cases where
114 * the top and bottom 32 bits of the encoded constant differ.
115 */
116 uint64_t imm64 = 0;
117 int n;
118
119 for (n = 0; n < 8; n++) {
120 if (imm & (1 << n)) {
121 imm64 |= (0xffULL << (n * 8));
122 }
123 }
124 return imm64;
125 }
126 imm |= (imm << 8) | (imm << 16) | (imm << 24);
127 break;
128 case 15:
129 if (op) {
130 /* Reserved encoding for AArch32; valid for AArch64 */
131 uint64_t imm64 = (uint64_t)(imm & 0x3f) << 48;
132 if (imm & 0x80) {
133 imm64 |= 0x8000000000000000ULL;
134 }
135 if (imm & 0x40) {
136 imm64 |= 0x3fc0000000000000ULL;
137 } else {
138 imm64 |= 0x4000000000000000ULL;
139 }
140 return imm64;
141 }
142 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
143 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
144 break;
145 }
146 if (op) {
147 imm = ~imm;
148 }
149 return dup_const(MO_32, imm);
150 }
151
152 /* Generate a label used for skipping this instruction */
arm_gen_condlabel(DisasContext * s)153 void arm_gen_condlabel(DisasContext *s)
154 {
155 if (!s->condjmp) {
156 s->condlabel = gen_disas_label(s);
157 s->condjmp = 1;
158 }
159 }
160
161 /* Flags for the disas_set_da_iss info argument:
162 * lower bits hold the Rt register number, higher bits are flags.
163 */
164 typedef enum ISSInfo {
165 ISSNone = 0,
166 ISSRegMask = 0x1f,
167 ISSInvalid = (1 << 5),
168 ISSIsAcqRel = (1 << 6),
169 ISSIsWrite = (1 << 7),
170 ISSIs16Bit = (1 << 8),
171 } ISSInfo;
172
173 /*
174 * Store var into env + offset to a member with size bytes.
175 * Free var after use.
176 */
store_cpu_offset(TCGv_i32 var,int offset,int size)177 void store_cpu_offset(TCGv_i32 var, int offset, int size)
178 {
179 switch (size) {
180 case 1:
181 tcg_gen_st8_i32(var, tcg_env, offset);
182 break;
183 case 4:
184 tcg_gen_st_i32(var, tcg_env, offset);
185 break;
186 default:
187 g_assert_not_reached();
188 }
189 }
190
191 /* Save the syndrome information for a Data Abort */
disas_set_da_iss(DisasContext * s,MemOp memop,ISSInfo issinfo)192 static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo)
193 {
194 uint32_t syn;
195 int sas = memop & MO_SIZE;
196 bool sse = memop & MO_SIGN;
197 bool is_acqrel = issinfo & ISSIsAcqRel;
198 bool is_write = issinfo & ISSIsWrite;
199 bool is_16bit = issinfo & ISSIs16Bit;
200 int srt = issinfo & ISSRegMask;
201
202 if (issinfo & ISSInvalid) {
203 /* Some callsites want to conditionally provide ISS info,
204 * eg "only if this was not a writeback"
205 */
206 return;
207 }
208
209 if (srt == 15) {
210 /* For AArch32, insns where the src/dest is R15 never generate
211 * ISS information. Catching that here saves checking at all
212 * the call sites.
213 */
214 return;
215 }
216
217 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
218 0, 0, 0, is_write, 0, is_16bit);
219 disas_set_insn_syndrome(s, syn);
220 }
221
get_a32_user_mem_index(DisasContext * s)222 static inline int get_a32_user_mem_index(DisasContext *s)
223 {
224 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
225 * insns:
226 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
227 * otherwise, access as if at PL0.
228 */
229 switch (s->mmu_idx) {
230 case ARMMMUIdx_E3:
231 case ARMMMUIdx_E30_0:
232 case ARMMMUIdx_E30_3_PAN:
233 return arm_to_core_mmu_idx(ARMMMUIdx_E30_0);
234 case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */
235 case ARMMMUIdx_E10_0:
236 case ARMMMUIdx_E10_1:
237 case ARMMMUIdx_E10_1_PAN:
238 return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
239 case ARMMMUIdx_MUser:
240 case ARMMMUIdx_MPriv:
241 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
242 case ARMMMUIdx_MUserNegPri:
243 case ARMMMUIdx_MPrivNegPri:
244 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
245 case ARMMMUIdx_MSUser:
246 case ARMMMUIdx_MSPriv:
247 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
248 case ARMMMUIdx_MSUserNegPri:
249 case ARMMMUIdx_MSPrivNegPri:
250 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
251 default:
252 g_assert_not_reached();
253 }
254 }
255
256 /* The pc_curr difference for an architectural jump. */
jmp_diff(DisasContext * s,target_long diff)257 static target_long jmp_diff(DisasContext *s, target_long diff)
258 {
259 return diff + (s->thumb ? 4 : 8);
260 }
261
gen_pc_plus_diff(DisasContext * s,TCGv_i32 var,target_long diff)262 static void gen_pc_plus_diff(DisasContext *s, TCGv_i32 var, target_long diff)
263 {
264 assert(s->pc_save != -1);
265 if (tb_cflags(s->base.tb) & CF_PCREL) {
266 tcg_gen_addi_i32(var, cpu_R[15], (s->pc_curr - s->pc_save) + diff);
267 } else {
268 tcg_gen_movi_i32(var, s->pc_curr + diff);
269 }
270 }
271
272 /* Set a variable to the value of a CPU register. */
load_reg_var(DisasContext * s,TCGv_i32 var,int reg)273 void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
274 {
275 if (reg == 15) {
276 gen_pc_plus_diff(s, var, jmp_diff(s, 0));
277 } else {
278 tcg_gen_mov_i32(var, cpu_R[reg]);
279 }
280 }
281
282 /*
283 * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
284 * This is used for load/store for which use of PC implies (literal),
285 * or ADD that implies ADR.
286 */
add_reg_for_lit(DisasContext * s,int reg,int ofs)287 TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
288 {
289 TCGv_i32 tmp = tcg_temp_new_i32();
290
291 if (reg == 15) {
292 /*
293 * This address is computed from an aligned PC:
294 * subtract off the low bits.
295 */
296 gen_pc_plus_diff(s, tmp, jmp_diff(s, ofs - (s->pc_curr & 3)));
297 } else {
298 tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
299 }
300 return tmp;
301 }
302
303 /* Set a CPU register. The source must be a temporary and will be
304 marked as dead. */
store_reg(DisasContext * s,int reg,TCGv_i32 var)305 void store_reg(DisasContext *s, int reg, TCGv_i32 var)
306 {
307 if (reg == 15) {
308 /* In Thumb mode, we must ignore bit 0.
309 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
310 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
311 * We choose to ignore [1:0] in ARM mode for all architecture versions.
312 */
313 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
314 s->base.is_jmp = DISAS_JUMP;
315 s->pc_save = -1;
316 } else if (reg == 13 && arm_dc_feature(s, ARM_FEATURE_M)) {
317 /* For M-profile SP bits [1:0] are always zero */
318 tcg_gen_andi_i32(var, var, ~3);
319 }
320 tcg_gen_mov_i32(cpu_R[reg], var);
321 }
322
323 /*
324 * Variant of store_reg which applies v8M stack-limit checks before updating
325 * SP. If the check fails this will result in an exception being taken.
326 * We disable the stack checks for CONFIG_USER_ONLY because we have
327 * no idea what the stack limits should be in that case.
328 * If stack checking is not being done this just acts like store_reg().
329 */
store_sp_checked(DisasContext * s,TCGv_i32 var)330 static void store_sp_checked(DisasContext *s, TCGv_i32 var)
331 {
332 #ifndef CONFIG_USER_ONLY
333 if (s->v8m_stackcheck) {
334 gen_helper_v8m_stackcheck(tcg_env, var);
335 }
336 #endif
337 store_reg(s, 13, var);
338 }
339
340 /* Value extensions. */
341 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
342 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
343 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
344 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
345
346 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
347 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
348
gen_set_cpsr(TCGv_i32 var,uint32_t mask)349 void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
350 {
351 gen_helper_cpsr_write(tcg_env, var, tcg_constant_i32(mask));
352 }
353
gen_rebuild_hflags(DisasContext * s,bool new_el)354 static void gen_rebuild_hflags(DisasContext *s, bool new_el)
355 {
356 bool m_profile = arm_dc_feature(s, ARM_FEATURE_M);
357
358 if (new_el) {
359 if (m_profile) {
360 gen_helper_rebuild_hflags_m32_newel(tcg_env);
361 } else {
362 gen_helper_rebuild_hflags_a32_newel(tcg_env);
363 }
364 } else {
365 TCGv_i32 tcg_el = tcg_constant_i32(s->current_el);
366 if (m_profile) {
367 gen_helper_rebuild_hflags_m32(tcg_env, tcg_el);
368 } else {
369 gen_helper_rebuild_hflags_a32(tcg_env, tcg_el);
370 }
371 }
372 }
373
gen_exception_internal(int excp)374 static void gen_exception_internal(int excp)
375 {
376 assert(excp_is_internal(excp));
377 gen_helper_exception_internal(tcg_env, tcg_constant_i32(excp));
378 }
379
gen_singlestep_exception(DisasContext * s)380 static void gen_singlestep_exception(DisasContext *s)
381 {
382 /* We just completed step of an insn. Move from Active-not-pending
383 * to Active-pending, and then also take the swstep exception.
384 * This corresponds to making the (IMPDEF) choice to prioritize
385 * swstep exceptions over asynchronous exceptions taken to an exception
386 * level where debug is disabled. This choice has the advantage that
387 * we do not need to maintain internal state corresponding to the
388 * ISV/EX syndrome bits between completion of the step and generation
389 * of the exception, and our syndrome information is always correct.
390 */
391 gen_ss_advance(s);
392 gen_swstep_exception(s, 1, s->is_ldex);
393 s->base.is_jmp = DISAS_NORETURN;
394 }
395
clear_eci_state(DisasContext * s)396 void clear_eci_state(DisasContext *s)
397 {
398 /*
399 * Clear any ECI/ICI state: used when a load multiple/store
400 * multiple insn executes.
401 */
402 if (s->eci) {
403 store_cpu_field_constant(0, condexec_bits);
404 s->eci = 0;
405 }
406 }
407
gen_smul_dual(TCGv_i32 a,TCGv_i32 b)408 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
409 {
410 TCGv_i32 tmp1 = tcg_temp_new_i32();
411 TCGv_i32 tmp2 = tcg_temp_new_i32();
412 tcg_gen_ext16s_i32(tmp1, a);
413 tcg_gen_ext16s_i32(tmp2, b);
414 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
415 tcg_gen_sari_i32(a, a, 16);
416 tcg_gen_sari_i32(b, b, 16);
417 tcg_gen_mul_i32(b, b, a);
418 tcg_gen_mov_i32(a, tmp1);
419 }
420
421 /* Byteswap each halfword. */
gen_rev16(TCGv_i32 dest,TCGv_i32 var)422 void gen_rev16(TCGv_i32 dest, TCGv_i32 var)
423 {
424 TCGv_i32 tmp = tcg_temp_new_i32();
425 TCGv_i32 mask = tcg_constant_i32(0x00ff00ff);
426 tcg_gen_shri_i32(tmp, var, 8);
427 tcg_gen_and_i32(tmp, tmp, mask);
428 tcg_gen_and_i32(var, var, mask);
429 tcg_gen_shli_i32(var, var, 8);
430 tcg_gen_or_i32(dest, var, tmp);
431 }
432
433 /* Byteswap low halfword and sign extend. */
gen_revsh(TCGv_i32 dest,TCGv_i32 var)434 static void gen_revsh(TCGv_i32 dest, TCGv_i32 var)
435 {
436 tcg_gen_bswap16_i32(var, var, TCG_BSWAP_OS);
437 }
438
439 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
440 tmp = (t0 ^ t1) & 0x8000;
441 t0 &= ~0x8000;
442 t1 &= ~0x8000;
443 t0 = (t0 + t1) ^ tmp;
444 */
445
gen_add16(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)446 static void gen_add16(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
447 {
448 TCGv_i32 tmp = tcg_temp_new_i32();
449 tcg_gen_xor_i32(tmp, t0, t1);
450 tcg_gen_andi_i32(tmp, tmp, 0x8000);
451 tcg_gen_andi_i32(t0, t0, ~0x8000);
452 tcg_gen_andi_i32(t1, t1, ~0x8000);
453 tcg_gen_add_i32(t0, t0, t1);
454 tcg_gen_xor_i32(dest, t0, tmp);
455 }
456
457 /* Set N and Z flags from var. */
gen_logic_CC(TCGv_i32 var)458 static inline void gen_logic_CC(TCGv_i32 var)
459 {
460 tcg_gen_mov_i32(cpu_NF, var);
461 tcg_gen_mov_i32(cpu_ZF, var);
462 }
463
464 /* dest = T0 + T1 + CF. */
gen_add_carry(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)465 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
466 {
467 tcg_gen_add_i32(dest, t0, t1);
468 tcg_gen_add_i32(dest, dest, cpu_CF);
469 }
470
471 /* dest = T0 - T1 + CF - 1. */
gen_sub_carry(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)472 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
473 {
474 tcg_gen_sub_i32(dest, t0, t1);
475 tcg_gen_add_i32(dest, dest, cpu_CF);
476 tcg_gen_subi_i32(dest, dest, 1);
477 }
478
479 /* dest = T0 + T1. Compute C, N, V and Z flags */
gen_add_CC(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)480 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
481 {
482 TCGv_i32 tmp = tcg_temp_new_i32();
483 tcg_gen_movi_i32(tmp, 0);
484 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
485 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
486 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
487 tcg_gen_xor_i32(tmp, t0, t1);
488 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
489 tcg_gen_mov_i32(dest, cpu_NF);
490 }
491
492 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
gen_adc_CC(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)493 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
494 {
495 TCGv_i32 tmp = tcg_temp_new_i32();
496 if (TCG_TARGET_HAS_add2_i32) {
497 tcg_gen_movi_i32(tmp, 0);
498 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
499 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
500 } else {
501 TCGv_i64 q0 = tcg_temp_new_i64();
502 TCGv_i64 q1 = tcg_temp_new_i64();
503 tcg_gen_extu_i32_i64(q0, t0);
504 tcg_gen_extu_i32_i64(q1, t1);
505 tcg_gen_add_i64(q0, q0, q1);
506 tcg_gen_extu_i32_i64(q1, cpu_CF);
507 tcg_gen_add_i64(q0, q0, q1);
508 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
509 }
510 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
511 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
512 tcg_gen_xor_i32(tmp, t0, t1);
513 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
514 tcg_gen_mov_i32(dest, cpu_NF);
515 }
516
517 /* dest = T0 - T1. Compute C, N, V and Z flags */
gen_sub_CC(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)518 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
519 {
520 TCGv_i32 tmp;
521 tcg_gen_sub_i32(cpu_NF, t0, t1);
522 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
523 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
524 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
525 tmp = tcg_temp_new_i32();
526 tcg_gen_xor_i32(tmp, t0, t1);
527 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
528 tcg_gen_mov_i32(dest, cpu_NF);
529 }
530
531 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
gen_sbc_CC(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)532 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
533 {
534 TCGv_i32 tmp = tcg_temp_new_i32();
535 tcg_gen_not_i32(tmp, t1);
536 gen_adc_CC(dest, t0, tmp);
537 }
538
539 #define GEN_SHIFT(name) \
540 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
541 { \
542 TCGv_i32 tmpd = tcg_temp_new_i32(); \
543 TCGv_i32 tmp1 = tcg_temp_new_i32(); \
544 TCGv_i32 zero = tcg_constant_i32(0); \
545 tcg_gen_andi_i32(tmp1, t1, 0x1f); \
546 tcg_gen_##name##_i32(tmpd, t0, tmp1); \
547 tcg_gen_andi_i32(tmp1, t1, 0xe0); \
548 tcg_gen_movcond_i32(TCG_COND_NE, dest, tmp1, zero, zero, tmpd); \
549 }
550 GEN_SHIFT(shl)
GEN_SHIFT(shr)551 GEN_SHIFT(shr)
552 #undef GEN_SHIFT
553
554 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
555 {
556 TCGv_i32 tmp1 = tcg_temp_new_i32();
557
558 tcg_gen_andi_i32(tmp1, t1, 0xff);
559 tcg_gen_umin_i32(tmp1, tmp1, tcg_constant_i32(31));
560 tcg_gen_sar_i32(dest, t0, tmp1);
561 }
562
shifter_out_im(TCGv_i32 var,int shift)563 static void shifter_out_im(TCGv_i32 var, int shift)
564 {
565 tcg_gen_extract_i32(cpu_CF, var, shift, 1);
566 }
567
568 /* Shift by immediate. Includes special handling for shift == 0. */
gen_arm_shift_im(TCGv_i32 var,int shiftop,int shift,int flags)569 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
570 int shift, int flags)
571 {
572 switch (shiftop) {
573 case 0: /* LSL */
574 if (shift != 0) {
575 if (flags)
576 shifter_out_im(var, 32 - shift);
577 tcg_gen_shli_i32(var, var, shift);
578 }
579 break;
580 case 1: /* LSR */
581 if (shift == 0) {
582 if (flags) {
583 tcg_gen_shri_i32(cpu_CF, var, 31);
584 }
585 tcg_gen_movi_i32(var, 0);
586 } else {
587 if (flags)
588 shifter_out_im(var, shift - 1);
589 tcg_gen_shri_i32(var, var, shift);
590 }
591 break;
592 case 2: /* ASR */
593 if (shift == 0)
594 shift = 32;
595 if (flags)
596 shifter_out_im(var, shift - 1);
597 if (shift == 32)
598 shift = 31;
599 tcg_gen_sari_i32(var, var, shift);
600 break;
601 case 3: /* ROR/RRX */
602 if (shift != 0) {
603 if (flags)
604 shifter_out_im(var, shift - 1);
605 tcg_gen_rotri_i32(var, var, shift); break;
606 } else {
607 TCGv_i32 tmp = tcg_temp_new_i32();
608 tcg_gen_shli_i32(tmp, cpu_CF, 31);
609 if (flags)
610 shifter_out_im(var, 0);
611 tcg_gen_shri_i32(var, var, 1);
612 tcg_gen_or_i32(var, var, tmp);
613 }
614 }
615 };
616
gen_arm_shift_reg(TCGv_i32 var,int shiftop,TCGv_i32 shift,int flags)617 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
618 TCGv_i32 shift, int flags)
619 {
620 if (flags) {
621 switch (shiftop) {
622 case 0: gen_helper_shl_cc(var, tcg_env, var, shift); break;
623 case 1: gen_helper_shr_cc(var, tcg_env, var, shift); break;
624 case 2: gen_helper_sar_cc(var, tcg_env, var, shift); break;
625 case 3: gen_helper_ror_cc(var, tcg_env, var, shift); break;
626 }
627 } else {
628 switch (shiftop) {
629 case 0:
630 gen_shl(var, var, shift);
631 break;
632 case 1:
633 gen_shr(var, var, shift);
634 break;
635 case 2:
636 gen_sar(var, var, shift);
637 break;
638 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
639 tcg_gen_rotr_i32(var, var, shift); break;
640 }
641 }
642 }
643
644 /*
645 * Generate a conditional based on ARM condition code cc.
646 * This is common between ARM and Aarch64 targets.
647 */
arm_test_cc(DisasCompare * cmp,int cc)648 void arm_test_cc(DisasCompare *cmp, int cc)
649 {
650 TCGv_i32 value;
651 TCGCond cond;
652
653 switch (cc) {
654 case 0: /* eq: Z */
655 case 1: /* ne: !Z */
656 cond = TCG_COND_EQ;
657 value = cpu_ZF;
658 break;
659
660 case 2: /* cs: C */
661 case 3: /* cc: !C */
662 cond = TCG_COND_NE;
663 value = cpu_CF;
664 break;
665
666 case 4: /* mi: N */
667 case 5: /* pl: !N */
668 cond = TCG_COND_LT;
669 value = cpu_NF;
670 break;
671
672 case 6: /* vs: V */
673 case 7: /* vc: !V */
674 cond = TCG_COND_LT;
675 value = cpu_VF;
676 break;
677
678 case 8: /* hi: C && !Z */
679 case 9: /* ls: !C || Z -> !(C && !Z) */
680 cond = TCG_COND_NE;
681 value = tcg_temp_new_i32();
682 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
683 ZF is non-zero for !Z; so AND the two subexpressions. */
684 tcg_gen_neg_i32(value, cpu_CF);
685 tcg_gen_and_i32(value, value, cpu_ZF);
686 break;
687
688 case 10: /* ge: N == V -> N ^ V == 0 */
689 case 11: /* lt: N != V -> N ^ V != 0 */
690 /* Since we're only interested in the sign bit, == 0 is >= 0. */
691 cond = TCG_COND_GE;
692 value = tcg_temp_new_i32();
693 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
694 break;
695
696 case 12: /* gt: !Z && N == V */
697 case 13: /* le: Z || N != V */
698 cond = TCG_COND_NE;
699 value = tcg_temp_new_i32();
700 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
701 * the sign bit then AND with ZF to yield the result. */
702 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
703 tcg_gen_sari_i32(value, value, 31);
704 tcg_gen_andc_i32(value, cpu_ZF, value);
705 break;
706
707 case 14: /* always */
708 case 15: /* always */
709 /* Use the ALWAYS condition, which will fold early.
710 * It doesn't matter what we use for the value. */
711 cond = TCG_COND_ALWAYS;
712 value = cpu_ZF;
713 goto no_invert;
714
715 default:
716 fprintf(stderr, "Bad condition code 0x%x\n", cc);
717 abort();
718 }
719
720 if (cc & 1) {
721 cond = tcg_invert_cond(cond);
722 }
723
724 no_invert:
725 cmp->cond = cond;
726 cmp->value = value;
727 }
728
arm_jump_cc(DisasCompare * cmp,TCGLabel * label)729 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
730 {
731 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
732 }
733
arm_gen_test_cc(int cc,TCGLabel * label)734 void arm_gen_test_cc(int cc, TCGLabel *label)
735 {
736 DisasCompare cmp;
737 arm_test_cc(&cmp, cc);
738 arm_jump_cc(&cmp, label);
739 }
740
gen_set_condexec(DisasContext * s)741 void gen_set_condexec(DisasContext *s)
742 {
743 if (s->condexec_mask) {
744 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
745
746 store_cpu_field_constant(val, condexec_bits);
747 }
748 }
749
gen_update_pc(DisasContext * s,target_long diff)750 void gen_update_pc(DisasContext *s, target_long diff)
751 {
752 gen_pc_plus_diff(s, cpu_R[15], diff);
753 s->pc_save = s->pc_curr + diff;
754 }
755
756 /* Set PC and Thumb state from var. var is marked as dead. */
gen_bx(DisasContext * s,TCGv_i32 var)757 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
758 {
759 s->base.is_jmp = DISAS_JUMP;
760 tcg_gen_andi_i32(cpu_R[15], var, ~1);
761 tcg_gen_andi_i32(var, var, 1);
762 store_cpu_field(var, thumb);
763 s->pc_save = -1;
764 }
765
766 /*
767 * Set PC and Thumb state from var. var is marked as dead.
768 * For M-profile CPUs, include logic to detect exception-return
769 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
770 * and BX reg, and no others, and happens only for code in Handler mode.
771 * The Security Extension also requires us to check for the FNC_RETURN
772 * which signals a function return from non-secure state; this can happen
773 * in both Handler and Thread mode.
774 * To avoid having to do multiple comparisons in inline generated code,
775 * we make the check we do here loose, so it will match for EXC_RETURN
776 * in Thread mode. For system emulation do_v7m_exception_exit() checks
777 * for these spurious cases and returns without doing anything (giving
778 * the same behaviour as for a branch to a non-magic address).
779 *
780 * In linux-user mode it is unclear what the right behaviour for an
781 * attempted FNC_RETURN should be, because in real hardware this will go
782 * directly to Secure code (ie not the Linux kernel) which will then treat
783 * the error in any way it chooses. For QEMU we opt to make the FNC_RETURN
784 * attempt behave the way it would on a CPU without the security extension,
785 * which is to say "like a normal branch". That means we can simply treat
786 * all branches as normal with no magic address behaviour.
787 */
gen_bx_excret(DisasContext * s,TCGv_i32 var)788 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
789 {
790 /* Generate the same code here as for a simple bx, but flag via
791 * s->base.is_jmp that we need to do the rest of the work later.
792 */
793 gen_bx(s, var);
794 #ifndef CONFIG_USER_ONLY
795 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
796 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
797 s->base.is_jmp = DISAS_BX_EXCRET;
798 }
799 #endif
800 }
801
gen_bx_excret_final_code(DisasContext * s)802 static inline void gen_bx_excret_final_code(DisasContext *s)
803 {
804 /* Generate the code to finish possible exception return and end the TB */
805 DisasLabel excret_label = gen_disas_label(s);
806 uint32_t min_magic;
807
808 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
809 /* Covers FNC_RETURN and EXC_RETURN magic */
810 min_magic = FNC_RETURN_MIN_MAGIC;
811 } else {
812 /* EXC_RETURN magic only */
813 min_magic = EXC_RETURN_MIN_MAGIC;
814 }
815
816 /* Is the new PC value in the magic range indicating exception return? */
817 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label.label);
818 /* No: end the TB as we would for a DISAS_JMP */
819 if (s->ss_active) {
820 gen_singlestep_exception(s);
821 } else {
822 tcg_gen_exit_tb(NULL, 0);
823 }
824 set_disas_label(s, excret_label);
825 /* Yes: this is an exception return.
826 * At this point in runtime env->regs[15] and env->thumb will hold
827 * the exception-return magic number, which do_v7m_exception_exit()
828 * will read. Nothing else will be able to see those values because
829 * the cpu-exec main loop guarantees that we will always go straight
830 * from raising the exception to the exception-handling code.
831 *
832 * gen_ss_advance(s) does nothing on M profile currently but
833 * calling it is conceptually the right thing as we have executed
834 * this instruction (compare SWI, HVC, SMC handling).
835 */
836 gen_ss_advance(s);
837 gen_exception_internal(EXCP_EXCEPTION_EXIT);
838 }
839
gen_bxns(DisasContext * s,int rm)840 static inline void gen_bxns(DisasContext *s, int rm)
841 {
842 TCGv_i32 var = load_reg(s, rm);
843
844 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
845 * we need to sync state before calling it, but:
846 * - we don't need to do gen_update_pc() because the bxns helper will
847 * always set the PC itself
848 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
849 * unless it's outside an IT block or the last insn in an IT block,
850 * so we know that condexec == 0 (already set at the top of the TB)
851 * is correct in the non-UNPREDICTABLE cases, and we can choose
852 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
853 */
854 gen_helper_v7m_bxns(tcg_env, var);
855 s->base.is_jmp = DISAS_EXIT;
856 }
857
gen_blxns(DisasContext * s,int rm)858 static inline void gen_blxns(DisasContext *s, int rm)
859 {
860 TCGv_i32 var = load_reg(s, rm);
861
862 /* We don't need to sync condexec state, for the same reason as bxns.
863 * We do however need to set the PC, because the blxns helper reads it.
864 * The blxns helper may throw an exception.
865 */
866 gen_update_pc(s, curr_insn_len(s));
867 gen_helper_v7m_blxns(tcg_env, var);
868 s->base.is_jmp = DISAS_EXIT;
869 }
870
871 /* Variant of store_reg which uses branch&exchange logic when storing
872 to r15 in ARM architecture v7 and above. The source must be a temporary
873 and will be marked as dead. */
store_reg_bx(DisasContext * s,int reg,TCGv_i32 var)874 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
875 {
876 if (reg == 15 && ENABLE_ARCH_7) {
877 gen_bx(s, var);
878 } else {
879 store_reg(s, reg, var);
880 }
881 }
882
883 /* Variant of store_reg which uses branch&exchange logic when storing
884 * to r15 in ARM architecture v5T and above. This is used for storing
885 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
886 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
store_reg_from_load(DisasContext * s,int reg,TCGv_i32 var)887 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
888 {
889 if (reg == 15 && ENABLE_ARCH_5) {
890 gen_bx_excret(s, var);
891 } else {
892 store_reg(s, reg, var);
893 }
894 }
895
896 #ifdef CONFIG_USER_ONLY
897 #define IS_USER_ONLY 1
898 #else
899 #define IS_USER_ONLY 0
900 #endif
901
pow2_align(unsigned i)902 MemOp pow2_align(unsigned i)
903 {
904 static const MemOp mop_align[] = {
905 0, MO_ALIGN_2, MO_ALIGN_4, MO_ALIGN_8, MO_ALIGN_16, MO_ALIGN_32
906 };
907 g_assert(i < ARRAY_SIZE(mop_align));
908 return mop_align[i];
909 }
910
911 /*
912 * Abstractions of "generate code to do a guest load/store for
913 * AArch32", where a vaddr is always 32 bits (and is zero
914 * extended if we're a 64 bit core) and data is also
915 * 32 bits unless specifically doing a 64 bit access.
916 * These functions work like tcg_gen_qemu_{ld,st}* except
917 * that the address argument is TCGv_i32 rather than TCGv.
918 */
919
gen_aa32_addr(DisasContext * s,TCGv_i32 a32,MemOp op)920 static TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
921 {
922 TCGv addr = tcg_temp_new();
923 tcg_gen_extu_i32_tl(addr, a32);
924
925 /* Not needed for user-mode BE32, where we use MO_BE instead. */
926 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
927 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
928 }
929 return addr;
930 }
931
932 /*
933 * Internal routines are used for NEON cases where the endianness
934 * and/or alignment has already been taken into account and manipulated.
935 */
gen_aa32_ld_internal_i32(DisasContext * s,TCGv_i32 val,TCGv_i32 a32,int index,MemOp opc)936 void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val,
937 TCGv_i32 a32, int index, MemOp opc)
938 {
939 TCGv addr = gen_aa32_addr(s, a32, opc);
940 tcg_gen_qemu_ld_i32(val, addr, index, opc);
941 }
942
gen_aa32_st_internal_i32(DisasContext * s,TCGv_i32 val,TCGv_i32 a32,int index,MemOp opc)943 void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
944 TCGv_i32 a32, int index, MemOp opc)
945 {
946 TCGv addr = gen_aa32_addr(s, a32, opc);
947 tcg_gen_qemu_st_i32(val, addr, index, opc);
948 }
949
gen_aa32_ld_internal_i64(DisasContext * s,TCGv_i64 val,TCGv_i32 a32,int index,MemOp opc)950 void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val,
951 TCGv_i32 a32, int index, MemOp opc)
952 {
953 TCGv addr = gen_aa32_addr(s, a32, opc);
954
955 tcg_gen_qemu_ld_i64(val, addr, index, opc);
956
957 /* Not needed for user-mode BE32, where we use MO_BE instead. */
958 if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
959 tcg_gen_rotri_i64(val, val, 32);
960 }
961 }
962
gen_aa32_st_internal_i64(DisasContext * s,TCGv_i64 val,TCGv_i32 a32,int index,MemOp opc)963 void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val,
964 TCGv_i32 a32, int index, MemOp opc)
965 {
966 TCGv addr = gen_aa32_addr(s, a32, opc);
967
968 /* Not needed for user-mode BE32, where we use MO_BE instead. */
969 if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
970 TCGv_i64 tmp = tcg_temp_new_i64();
971 tcg_gen_rotri_i64(tmp, val, 32);
972 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
973 } else {
974 tcg_gen_qemu_st_i64(val, addr, index, opc);
975 }
976 }
977
gen_aa32_ld_i32(DisasContext * s,TCGv_i32 val,TCGv_i32 a32,int index,MemOp opc)978 void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
979 int index, MemOp opc)
980 {
981 gen_aa32_ld_internal_i32(s, val, a32, index, finalize_memop(s, opc));
982 }
983
gen_aa32_st_i32(DisasContext * s,TCGv_i32 val,TCGv_i32 a32,int index,MemOp opc)984 void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
985 int index, MemOp opc)
986 {
987 gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc));
988 }
989
gen_aa32_ld_i64(DisasContext * s,TCGv_i64 val,TCGv_i32 a32,int index,MemOp opc)990 void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
991 int index, MemOp opc)
992 {
993 gen_aa32_ld_internal_i64(s, val, a32, index, finalize_memop(s, opc));
994 }
995
gen_aa32_st_i64(DisasContext * s,TCGv_i64 val,TCGv_i32 a32,int index,MemOp opc)996 void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
997 int index, MemOp opc)
998 {
999 gen_aa32_st_internal_i64(s, val, a32, index, finalize_memop(s, opc));
1000 }
1001
1002 #define DO_GEN_LD(SUFF, OPC) \
1003 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1004 TCGv_i32 a32, int index) \
1005 { \
1006 gen_aa32_ld_i32(s, val, a32, index, OPC); \
1007 }
1008
1009 #define DO_GEN_ST(SUFF, OPC) \
1010 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1011 TCGv_i32 a32, int index) \
1012 { \
1013 gen_aa32_st_i32(s, val, a32, index, OPC); \
1014 }
1015
gen_hvc(DisasContext * s,int imm16)1016 static inline void gen_hvc(DisasContext *s, int imm16)
1017 {
1018 /* The pre HVC helper handles cases when HVC gets trapped
1019 * as an undefined insn by runtime configuration (ie before
1020 * the insn really executes).
1021 */
1022 gen_update_pc(s, 0);
1023 gen_helper_pre_hvc(tcg_env);
1024 /* Otherwise we will treat this as a real exception which
1025 * happens after execution of the insn. (The distinction matters
1026 * for the PC value reported to the exception handler and also
1027 * for single stepping.)
1028 */
1029 s->svc_imm = imm16;
1030 gen_update_pc(s, curr_insn_len(s));
1031 s->base.is_jmp = DISAS_HVC;
1032 }
1033
gen_smc(DisasContext * s)1034 static inline void gen_smc(DisasContext *s)
1035 {
1036 /* As with HVC, we may take an exception either before or after
1037 * the insn executes.
1038 */
1039 gen_update_pc(s, 0);
1040 gen_helper_pre_smc(tcg_env, tcg_constant_i32(syn_aa32_smc()));
1041 gen_update_pc(s, curr_insn_len(s));
1042 s->base.is_jmp = DISAS_SMC;
1043 }
1044
gen_exception_internal_insn(DisasContext * s,int excp)1045 static void gen_exception_internal_insn(DisasContext *s, int excp)
1046 {
1047 gen_set_condexec(s);
1048 gen_update_pc(s, 0);
1049 gen_exception_internal(excp);
1050 s->base.is_jmp = DISAS_NORETURN;
1051 }
1052
gen_exception_el_v(int excp,uint32_t syndrome,TCGv_i32 tcg_el)1053 static void gen_exception_el_v(int excp, uint32_t syndrome, TCGv_i32 tcg_el)
1054 {
1055 gen_helper_exception_with_syndrome_el(tcg_env, tcg_constant_i32(excp),
1056 tcg_constant_i32(syndrome), tcg_el);
1057 }
1058
gen_exception_el(int excp,uint32_t syndrome,uint32_t target_el)1059 static void gen_exception_el(int excp, uint32_t syndrome, uint32_t target_el)
1060 {
1061 gen_exception_el_v(excp, syndrome, tcg_constant_i32(target_el));
1062 }
1063
gen_exception(int excp,uint32_t syndrome)1064 static void gen_exception(int excp, uint32_t syndrome)
1065 {
1066 gen_helper_exception_with_syndrome(tcg_env, tcg_constant_i32(excp),
1067 tcg_constant_i32(syndrome));
1068 }
1069
gen_exception_insn_el_v(DisasContext * s,target_long pc_diff,int excp,uint32_t syn,TCGv_i32 tcg_el)1070 static void gen_exception_insn_el_v(DisasContext *s, target_long pc_diff,
1071 int excp, uint32_t syn, TCGv_i32 tcg_el)
1072 {
1073 if (s->aarch64) {
1074 gen_a64_update_pc(s, pc_diff);
1075 } else {
1076 gen_set_condexec(s);
1077 gen_update_pc(s, pc_diff);
1078 }
1079 gen_exception_el_v(excp, syn, tcg_el);
1080 s->base.is_jmp = DISAS_NORETURN;
1081 }
1082
gen_exception_insn_el(DisasContext * s,target_long pc_diff,int excp,uint32_t syn,uint32_t target_el)1083 void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp,
1084 uint32_t syn, uint32_t target_el)
1085 {
1086 gen_exception_insn_el_v(s, pc_diff, excp, syn,
1087 tcg_constant_i32(target_el));
1088 }
1089
gen_exception_insn(DisasContext * s,target_long pc_diff,int excp,uint32_t syn)1090 void gen_exception_insn(DisasContext *s, target_long pc_diff,
1091 int excp, uint32_t syn)
1092 {
1093 if (s->aarch64) {
1094 gen_a64_update_pc(s, pc_diff);
1095 } else {
1096 gen_set_condexec(s);
1097 gen_update_pc(s, pc_diff);
1098 }
1099 gen_exception(excp, syn);
1100 s->base.is_jmp = DISAS_NORETURN;
1101 }
1102
gen_exception_bkpt_insn(DisasContext * s,uint32_t syn)1103 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
1104 {
1105 gen_set_condexec(s);
1106 gen_update_pc(s, 0);
1107 gen_helper_exception_bkpt_insn(tcg_env, tcg_constant_i32(syn));
1108 s->base.is_jmp = DISAS_NORETURN;
1109 }
1110
unallocated_encoding(DisasContext * s)1111 void unallocated_encoding(DisasContext *s)
1112 {
1113 /* Unallocated and reserved encodings are uncategorized */
1114 gen_exception_insn(s, 0, EXCP_UDEF, syn_uncategorized());
1115 }
1116
1117 /* Force a TB lookup after an instruction that changes the CPU state. */
gen_lookup_tb(DisasContext * s)1118 void gen_lookup_tb(DisasContext *s)
1119 {
1120 gen_pc_plus_diff(s, cpu_R[15], curr_insn_len(s));
1121 s->base.is_jmp = DISAS_EXIT;
1122 }
1123
gen_hlt(DisasContext * s,int imm)1124 static inline void gen_hlt(DisasContext *s, int imm)
1125 {
1126 /* HLT. This has two purposes.
1127 * Architecturally, it is an external halting debug instruction.
1128 * Since QEMU doesn't implement external debug, we treat this as
1129 * it is required for halting debug disabled: it will UNDEF.
1130 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1131 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1132 * must trigger semihosting even for ARMv7 and earlier, where
1133 * HLT was an undefined encoding.
1134 * In system mode, we don't allow userspace access to
1135 * semihosting, to provide some semblance of security
1136 * (and for consistency with our 32-bit semihosting).
1137 */
1138 if (semihosting_enabled(s->current_el == 0) &&
1139 (imm == (s->thumb ? 0x3c : 0xf000))) {
1140 gen_exception_internal_insn(s, EXCP_SEMIHOST);
1141 return;
1142 }
1143
1144 unallocated_encoding(s);
1145 }
1146
1147 /*
1148 * Return the offset of a "full" NEON Dreg.
1149 */
neon_full_reg_offset(unsigned reg)1150 long neon_full_reg_offset(unsigned reg)
1151 {
1152 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
1153 }
1154
1155 /*
1156 * Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1157 * where 0 is the least significant end of the register.
1158 */
neon_element_offset(int reg,int element,MemOp memop)1159 long neon_element_offset(int reg, int element, MemOp memop)
1160 {
1161 int element_size = 1 << (memop & MO_SIZE);
1162 int ofs = element * element_size;
1163 #if HOST_BIG_ENDIAN
1164 /*
1165 * Calculate the offset assuming fully little-endian,
1166 * then XOR to account for the order of the 8-byte units.
1167 */
1168 if (element_size < 8) {
1169 ofs ^= 8 - element_size;
1170 }
1171 #endif
1172 return neon_full_reg_offset(reg) + ofs;
1173 }
1174
1175 /* Return the offset of a VFP Dreg (dp = true) or VFP Sreg (dp = false). */
vfp_reg_offset(bool dp,unsigned reg)1176 long vfp_reg_offset(bool dp, unsigned reg)
1177 {
1178 if (dp) {
1179 return neon_element_offset(reg, 0, MO_64);
1180 } else {
1181 return neon_element_offset(reg >> 1, reg & 1, MO_32);
1182 }
1183 }
1184
read_neon_element32(TCGv_i32 dest,int reg,int ele,MemOp memop)1185 void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop)
1186 {
1187 long off = neon_element_offset(reg, ele, memop);
1188
1189 switch (memop) {
1190 case MO_SB:
1191 tcg_gen_ld8s_i32(dest, tcg_env, off);
1192 break;
1193 case MO_UB:
1194 tcg_gen_ld8u_i32(dest, tcg_env, off);
1195 break;
1196 case MO_SW:
1197 tcg_gen_ld16s_i32(dest, tcg_env, off);
1198 break;
1199 case MO_UW:
1200 tcg_gen_ld16u_i32(dest, tcg_env, off);
1201 break;
1202 case MO_UL:
1203 case MO_SL:
1204 tcg_gen_ld_i32(dest, tcg_env, off);
1205 break;
1206 default:
1207 g_assert_not_reached();
1208 }
1209 }
1210
read_neon_element64(TCGv_i64 dest,int reg,int ele,MemOp memop)1211 void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop)
1212 {
1213 long off = neon_element_offset(reg, ele, memop);
1214
1215 switch (memop) {
1216 case MO_SL:
1217 tcg_gen_ld32s_i64(dest, tcg_env, off);
1218 break;
1219 case MO_UL:
1220 tcg_gen_ld32u_i64(dest, tcg_env, off);
1221 break;
1222 case MO_UQ:
1223 tcg_gen_ld_i64(dest, tcg_env, off);
1224 break;
1225 default:
1226 g_assert_not_reached();
1227 }
1228 }
1229
write_neon_element32(TCGv_i32 src,int reg,int ele,MemOp memop)1230 void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
1231 {
1232 long off = neon_element_offset(reg, ele, memop);
1233
1234 switch (memop) {
1235 case MO_8:
1236 tcg_gen_st8_i32(src, tcg_env, off);
1237 break;
1238 case MO_16:
1239 tcg_gen_st16_i32(src, tcg_env, off);
1240 break;
1241 case MO_32:
1242 tcg_gen_st_i32(src, tcg_env, off);
1243 break;
1244 default:
1245 g_assert_not_reached();
1246 }
1247 }
1248
write_neon_element64(TCGv_i64 src,int reg,int ele,MemOp memop)1249 void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop)
1250 {
1251 long off = neon_element_offset(reg, ele, memop);
1252
1253 switch (memop) {
1254 case MO_32:
1255 tcg_gen_st32_i64(src, tcg_env, off);
1256 break;
1257 case MO_64:
1258 tcg_gen_st_i64(src, tcg_env, off);
1259 break;
1260 default:
1261 g_assert_not_reached();
1262 }
1263 }
1264
1265 #define ARM_CP_RW_BIT (1 << 20)
1266
iwmmxt_load_reg(TCGv_i64 var,int reg)1267 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1268 {
1269 tcg_gen_ld_i64(var, tcg_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1270 }
1271
iwmmxt_store_reg(TCGv_i64 var,int reg)1272 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1273 {
1274 tcg_gen_st_i64(var, tcg_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1275 }
1276
iwmmxt_load_creg(int reg)1277 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1278 {
1279 TCGv_i32 var = tcg_temp_new_i32();
1280 tcg_gen_ld_i32(var, tcg_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1281 return var;
1282 }
1283
iwmmxt_store_creg(int reg,TCGv_i32 var)1284 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1285 {
1286 tcg_gen_st_i32(var, tcg_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1287 }
1288
gen_op_iwmmxt_movq_wRn_M0(int rn)1289 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1290 {
1291 iwmmxt_store_reg(cpu_M0, rn);
1292 }
1293
gen_op_iwmmxt_movq_M0_wRn(int rn)1294 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1295 {
1296 iwmmxt_load_reg(cpu_M0, rn);
1297 }
1298
gen_op_iwmmxt_orq_M0_wRn(int rn)1299 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1300 {
1301 iwmmxt_load_reg(cpu_V1, rn);
1302 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1303 }
1304
gen_op_iwmmxt_andq_M0_wRn(int rn)1305 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1306 {
1307 iwmmxt_load_reg(cpu_V1, rn);
1308 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1309 }
1310
gen_op_iwmmxt_xorq_M0_wRn(int rn)1311 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1312 {
1313 iwmmxt_load_reg(cpu_V1, rn);
1314 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1315 }
1316
1317 #define IWMMXT_OP(name) \
1318 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1319 { \
1320 iwmmxt_load_reg(cpu_V1, rn); \
1321 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1322 }
1323
1324 #define IWMMXT_OP_ENV(name) \
1325 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1326 { \
1327 iwmmxt_load_reg(cpu_V1, rn); \
1328 gen_helper_iwmmxt_##name(cpu_M0, tcg_env, cpu_M0, cpu_V1); \
1329 }
1330
1331 #define IWMMXT_OP_ENV_SIZE(name) \
1332 IWMMXT_OP_ENV(name##b) \
1333 IWMMXT_OP_ENV(name##w) \
1334 IWMMXT_OP_ENV(name##l)
1335
1336 #define IWMMXT_OP_ENV1(name) \
1337 static inline void gen_op_iwmmxt_##name##_M0(void) \
1338 { \
1339 gen_helper_iwmmxt_##name(cpu_M0, tcg_env, cpu_M0); \
1340 }
1341
1342 IWMMXT_OP(maddsq)
IWMMXT_OP(madduq)1343 IWMMXT_OP(madduq)
1344 IWMMXT_OP(sadb)
1345 IWMMXT_OP(sadw)
1346 IWMMXT_OP(mulslw)
1347 IWMMXT_OP(mulshw)
1348 IWMMXT_OP(mululw)
1349 IWMMXT_OP(muluhw)
1350 IWMMXT_OP(macsw)
1351 IWMMXT_OP(macuw)
1352
1353 IWMMXT_OP_ENV_SIZE(unpackl)
1354 IWMMXT_OP_ENV_SIZE(unpackh)
1355
1356 IWMMXT_OP_ENV1(unpacklub)
1357 IWMMXT_OP_ENV1(unpackluw)
1358 IWMMXT_OP_ENV1(unpacklul)
1359 IWMMXT_OP_ENV1(unpackhub)
1360 IWMMXT_OP_ENV1(unpackhuw)
1361 IWMMXT_OP_ENV1(unpackhul)
1362 IWMMXT_OP_ENV1(unpacklsb)
1363 IWMMXT_OP_ENV1(unpacklsw)
1364 IWMMXT_OP_ENV1(unpacklsl)
1365 IWMMXT_OP_ENV1(unpackhsb)
1366 IWMMXT_OP_ENV1(unpackhsw)
1367 IWMMXT_OP_ENV1(unpackhsl)
1368
1369 IWMMXT_OP_ENV_SIZE(cmpeq)
1370 IWMMXT_OP_ENV_SIZE(cmpgtu)
1371 IWMMXT_OP_ENV_SIZE(cmpgts)
1372
1373 IWMMXT_OP_ENV_SIZE(mins)
1374 IWMMXT_OP_ENV_SIZE(minu)
1375 IWMMXT_OP_ENV_SIZE(maxs)
1376 IWMMXT_OP_ENV_SIZE(maxu)
1377
1378 IWMMXT_OP_ENV_SIZE(subn)
1379 IWMMXT_OP_ENV_SIZE(addn)
1380 IWMMXT_OP_ENV_SIZE(subu)
1381 IWMMXT_OP_ENV_SIZE(addu)
1382 IWMMXT_OP_ENV_SIZE(subs)
1383 IWMMXT_OP_ENV_SIZE(adds)
1384
1385 IWMMXT_OP_ENV(avgb0)
1386 IWMMXT_OP_ENV(avgb1)
1387 IWMMXT_OP_ENV(avgw0)
1388 IWMMXT_OP_ENV(avgw1)
1389
1390 IWMMXT_OP_ENV(packuw)
1391 IWMMXT_OP_ENV(packul)
1392 IWMMXT_OP_ENV(packuq)
1393 IWMMXT_OP_ENV(packsw)
1394 IWMMXT_OP_ENV(packsl)
1395 IWMMXT_OP_ENV(packsq)
1396
1397 static void gen_op_iwmmxt_set_mup(void)
1398 {
1399 TCGv_i32 tmp;
1400 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1401 tcg_gen_ori_i32(tmp, tmp, 2);
1402 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1403 }
1404
gen_op_iwmmxt_set_cup(void)1405 static void gen_op_iwmmxt_set_cup(void)
1406 {
1407 TCGv_i32 tmp;
1408 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1409 tcg_gen_ori_i32(tmp, tmp, 1);
1410 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1411 }
1412
gen_op_iwmmxt_setpsr_nz(void)1413 static void gen_op_iwmmxt_setpsr_nz(void)
1414 {
1415 TCGv_i32 tmp = tcg_temp_new_i32();
1416 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1417 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1418 }
1419
gen_op_iwmmxt_addl_M0_wRn(int rn)1420 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1421 {
1422 iwmmxt_load_reg(cpu_V1, rn);
1423 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1424 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1425 }
1426
gen_iwmmxt_address(DisasContext * s,uint32_t insn,TCGv_i32 dest)1427 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1428 TCGv_i32 dest)
1429 {
1430 int rd;
1431 uint32_t offset;
1432 TCGv_i32 tmp;
1433
1434 rd = (insn >> 16) & 0xf;
1435 tmp = load_reg(s, rd);
1436
1437 offset = (insn & 0xff) << ((insn >> 7) & 2);
1438 if (insn & (1 << 24)) {
1439 /* Pre indexed */
1440 if (insn & (1 << 23))
1441 tcg_gen_addi_i32(tmp, tmp, offset);
1442 else
1443 tcg_gen_addi_i32(tmp, tmp, -offset);
1444 tcg_gen_mov_i32(dest, tmp);
1445 if (insn & (1 << 21)) {
1446 store_reg(s, rd, tmp);
1447 }
1448 } else if (insn & (1 << 21)) {
1449 /* Post indexed */
1450 tcg_gen_mov_i32(dest, tmp);
1451 if (insn & (1 << 23))
1452 tcg_gen_addi_i32(tmp, tmp, offset);
1453 else
1454 tcg_gen_addi_i32(tmp, tmp, -offset);
1455 store_reg(s, rd, tmp);
1456 } else if (!(insn & (1 << 23)))
1457 return 1;
1458 return 0;
1459 }
1460
gen_iwmmxt_shift(uint32_t insn,uint32_t mask,TCGv_i32 dest)1461 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1462 {
1463 int rd = (insn >> 0) & 0xf;
1464 TCGv_i32 tmp;
1465
1466 if (insn & (1 << 8)) {
1467 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1468 return 1;
1469 } else {
1470 tmp = iwmmxt_load_creg(rd);
1471 }
1472 } else {
1473 tmp = tcg_temp_new_i32();
1474 iwmmxt_load_reg(cpu_V0, rd);
1475 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1476 }
1477 tcg_gen_andi_i32(tmp, tmp, mask);
1478 tcg_gen_mov_i32(dest, tmp);
1479 return 0;
1480 }
1481
1482 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1483 (ie. an undefined instruction). */
disas_iwmmxt_insn(DisasContext * s,uint32_t insn)1484 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1485 {
1486 int rd, wrd;
1487 int rdhi, rdlo, rd0, rd1, i;
1488 TCGv_i32 addr;
1489 TCGv_i32 tmp, tmp2, tmp3;
1490
1491 if ((insn & 0x0e000e00) == 0x0c000000) {
1492 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1493 wrd = insn & 0xf;
1494 rdlo = (insn >> 12) & 0xf;
1495 rdhi = (insn >> 16) & 0xf;
1496 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1497 iwmmxt_load_reg(cpu_V0, wrd);
1498 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1499 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
1500 } else { /* TMCRR */
1501 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1502 iwmmxt_store_reg(cpu_V0, wrd);
1503 gen_op_iwmmxt_set_mup();
1504 }
1505 return 0;
1506 }
1507
1508 wrd = (insn >> 12) & 0xf;
1509 addr = tcg_temp_new_i32();
1510 if (gen_iwmmxt_address(s, insn, addr)) {
1511 return 1;
1512 }
1513 if (insn & ARM_CP_RW_BIT) {
1514 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1515 tmp = tcg_temp_new_i32();
1516 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1517 iwmmxt_store_creg(wrd, tmp);
1518 } else {
1519 i = 1;
1520 if (insn & (1 << 8)) {
1521 if (insn & (1 << 22)) { /* WLDRD */
1522 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1523 i = 0;
1524 } else { /* WLDRW wRd */
1525 tmp = tcg_temp_new_i32();
1526 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1527 }
1528 } else {
1529 tmp = tcg_temp_new_i32();
1530 if (insn & (1 << 22)) { /* WLDRH */
1531 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1532 } else { /* WLDRB */
1533 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1534 }
1535 }
1536 if (i) {
1537 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1538 }
1539 gen_op_iwmmxt_movq_wRn_M0(wrd);
1540 }
1541 } else {
1542 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1543 tmp = iwmmxt_load_creg(wrd);
1544 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1545 } else {
1546 gen_op_iwmmxt_movq_M0_wRn(wrd);
1547 tmp = tcg_temp_new_i32();
1548 if (insn & (1 << 8)) {
1549 if (insn & (1 << 22)) { /* WSTRD */
1550 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1551 } else { /* WSTRW wRd */
1552 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1553 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1554 }
1555 } else {
1556 if (insn & (1 << 22)) { /* WSTRH */
1557 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1558 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1559 } else { /* WSTRB */
1560 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1561 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1562 }
1563 }
1564 }
1565 }
1566 return 0;
1567 }
1568
1569 if ((insn & 0x0f000000) != 0x0e000000)
1570 return 1;
1571
1572 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1573 case 0x000: /* WOR */
1574 wrd = (insn >> 12) & 0xf;
1575 rd0 = (insn >> 0) & 0xf;
1576 rd1 = (insn >> 16) & 0xf;
1577 gen_op_iwmmxt_movq_M0_wRn(rd0);
1578 gen_op_iwmmxt_orq_M0_wRn(rd1);
1579 gen_op_iwmmxt_setpsr_nz();
1580 gen_op_iwmmxt_movq_wRn_M0(wrd);
1581 gen_op_iwmmxt_set_mup();
1582 gen_op_iwmmxt_set_cup();
1583 break;
1584 case 0x011: /* TMCR */
1585 if (insn & 0xf)
1586 return 1;
1587 rd = (insn >> 12) & 0xf;
1588 wrd = (insn >> 16) & 0xf;
1589 switch (wrd) {
1590 case ARM_IWMMXT_wCID:
1591 case ARM_IWMMXT_wCASF:
1592 break;
1593 case ARM_IWMMXT_wCon:
1594 gen_op_iwmmxt_set_cup();
1595 /* Fall through. */
1596 case ARM_IWMMXT_wCSSF:
1597 tmp = iwmmxt_load_creg(wrd);
1598 tmp2 = load_reg(s, rd);
1599 tcg_gen_andc_i32(tmp, tmp, tmp2);
1600 iwmmxt_store_creg(wrd, tmp);
1601 break;
1602 case ARM_IWMMXT_wCGR0:
1603 case ARM_IWMMXT_wCGR1:
1604 case ARM_IWMMXT_wCGR2:
1605 case ARM_IWMMXT_wCGR3:
1606 gen_op_iwmmxt_set_cup();
1607 tmp = load_reg(s, rd);
1608 iwmmxt_store_creg(wrd, tmp);
1609 break;
1610 default:
1611 return 1;
1612 }
1613 break;
1614 case 0x100: /* WXOR */
1615 wrd = (insn >> 12) & 0xf;
1616 rd0 = (insn >> 0) & 0xf;
1617 rd1 = (insn >> 16) & 0xf;
1618 gen_op_iwmmxt_movq_M0_wRn(rd0);
1619 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1620 gen_op_iwmmxt_setpsr_nz();
1621 gen_op_iwmmxt_movq_wRn_M0(wrd);
1622 gen_op_iwmmxt_set_mup();
1623 gen_op_iwmmxt_set_cup();
1624 break;
1625 case 0x111: /* TMRC */
1626 if (insn & 0xf)
1627 return 1;
1628 rd = (insn >> 12) & 0xf;
1629 wrd = (insn >> 16) & 0xf;
1630 tmp = iwmmxt_load_creg(wrd);
1631 store_reg(s, rd, tmp);
1632 break;
1633 case 0x300: /* WANDN */
1634 wrd = (insn >> 12) & 0xf;
1635 rd0 = (insn >> 0) & 0xf;
1636 rd1 = (insn >> 16) & 0xf;
1637 gen_op_iwmmxt_movq_M0_wRn(rd0);
1638 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1639 gen_op_iwmmxt_andq_M0_wRn(rd1);
1640 gen_op_iwmmxt_setpsr_nz();
1641 gen_op_iwmmxt_movq_wRn_M0(wrd);
1642 gen_op_iwmmxt_set_mup();
1643 gen_op_iwmmxt_set_cup();
1644 break;
1645 case 0x200: /* WAND */
1646 wrd = (insn >> 12) & 0xf;
1647 rd0 = (insn >> 0) & 0xf;
1648 rd1 = (insn >> 16) & 0xf;
1649 gen_op_iwmmxt_movq_M0_wRn(rd0);
1650 gen_op_iwmmxt_andq_M0_wRn(rd1);
1651 gen_op_iwmmxt_setpsr_nz();
1652 gen_op_iwmmxt_movq_wRn_M0(wrd);
1653 gen_op_iwmmxt_set_mup();
1654 gen_op_iwmmxt_set_cup();
1655 break;
1656 case 0x810: case 0xa10: /* WMADD */
1657 wrd = (insn >> 12) & 0xf;
1658 rd0 = (insn >> 0) & 0xf;
1659 rd1 = (insn >> 16) & 0xf;
1660 gen_op_iwmmxt_movq_M0_wRn(rd0);
1661 if (insn & (1 << 21))
1662 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1663 else
1664 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1665 gen_op_iwmmxt_movq_wRn_M0(wrd);
1666 gen_op_iwmmxt_set_mup();
1667 break;
1668 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1669 wrd = (insn >> 12) & 0xf;
1670 rd0 = (insn >> 16) & 0xf;
1671 rd1 = (insn >> 0) & 0xf;
1672 gen_op_iwmmxt_movq_M0_wRn(rd0);
1673 switch ((insn >> 22) & 3) {
1674 case 0:
1675 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1676 break;
1677 case 1:
1678 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1679 break;
1680 case 2:
1681 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1682 break;
1683 case 3:
1684 return 1;
1685 }
1686 gen_op_iwmmxt_movq_wRn_M0(wrd);
1687 gen_op_iwmmxt_set_mup();
1688 gen_op_iwmmxt_set_cup();
1689 break;
1690 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1691 wrd = (insn >> 12) & 0xf;
1692 rd0 = (insn >> 16) & 0xf;
1693 rd1 = (insn >> 0) & 0xf;
1694 gen_op_iwmmxt_movq_M0_wRn(rd0);
1695 switch ((insn >> 22) & 3) {
1696 case 0:
1697 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1698 break;
1699 case 1:
1700 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1701 break;
1702 case 2:
1703 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1704 break;
1705 case 3:
1706 return 1;
1707 }
1708 gen_op_iwmmxt_movq_wRn_M0(wrd);
1709 gen_op_iwmmxt_set_mup();
1710 gen_op_iwmmxt_set_cup();
1711 break;
1712 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1713 wrd = (insn >> 12) & 0xf;
1714 rd0 = (insn >> 16) & 0xf;
1715 rd1 = (insn >> 0) & 0xf;
1716 gen_op_iwmmxt_movq_M0_wRn(rd0);
1717 if (insn & (1 << 22))
1718 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1719 else
1720 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1721 if (!(insn & (1 << 20)))
1722 gen_op_iwmmxt_addl_M0_wRn(wrd);
1723 gen_op_iwmmxt_movq_wRn_M0(wrd);
1724 gen_op_iwmmxt_set_mup();
1725 break;
1726 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1727 wrd = (insn >> 12) & 0xf;
1728 rd0 = (insn >> 16) & 0xf;
1729 rd1 = (insn >> 0) & 0xf;
1730 gen_op_iwmmxt_movq_M0_wRn(rd0);
1731 if (insn & (1 << 21)) {
1732 if (insn & (1 << 20))
1733 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1734 else
1735 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1736 } else {
1737 if (insn & (1 << 20))
1738 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1739 else
1740 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1741 }
1742 gen_op_iwmmxt_movq_wRn_M0(wrd);
1743 gen_op_iwmmxt_set_mup();
1744 break;
1745 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1746 wrd = (insn >> 12) & 0xf;
1747 rd0 = (insn >> 16) & 0xf;
1748 rd1 = (insn >> 0) & 0xf;
1749 gen_op_iwmmxt_movq_M0_wRn(rd0);
1750 if (insn & (1 << 21))
1751 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1752 else
1753 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1754 if (!(insn & (1 << 20))) {
1755 iwmmxt_load_reg(cpu_V1, wrd);
1756 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1757 }
1758 gen_op_iwmmxt_movq_wRn_M0(wrd);
1759 gen_op_iwmmxt_set_mup();
1760 break;
1761 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1762 wrd = (insn >> 12) & 0xf;
1763 rd0 = (insn >> 16) & 0xf;
1764 rd1 = (insn >> 0) & 0xf;
1765 gen_op_iwmmxt_movq_M0_wRn(rd0);
1766 switch ((insn >> 22) & 3) {
1767 case 0:
1768 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1769 break;
1770 case 1:
1771 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1772 break;
1773 case 2:
1774 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1775 break;
1776 case 3:
1777 return 1;
1778 }
1779 gen_op_iwmmxt_movq_wRn_M0(wrd);
1780 gen_op_iwmmxt_set_mup();
1781 gen_op_iwmmxt_set_cup();
1782 break;
1783 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1784 wrd = (insn >> 12) & 0xf;
1785 rd0 = (insn >> 16) & 0xf;
1786 rd1 = (insn >> 0) & 0xf;
1787 gen_op_iwmmxt_movq_M0_wRn(rd0);
1788 if (insn & (1 << 22)) {
1789 if (insn & (1 << 20))
1790 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1791 else
1792 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1793 } else {
1794 if (insn & (1 << 20))
1795 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1796 else
1797 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1798 }
1799 gen_op_iwmmxt_movq_wRn_M0(wrd);
1800 gen_op_iwmmxt_set_mup();
1801 gen_op_iwmmxt_set_cup();
1802 break;
1803 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1804 wrd = (insn >> 12) & 0xf;
1805 rd0 = (insn >> 16) & 0xf;
1806 rd1 = (insn >> 0) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0);
1808 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1809 tcg_gen_andi_i32(tmp, tmp, 7);
1810 iwmmxt_load_reg(cpu_V1, rd1);
1811 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1812 gen_op_iwmmxt_movq_wRn_M0(wrd);
1813 gen_op_iwmmxt_set_mup();
1814 break;
1815 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1816 if (((insn >> 6) & 3) == 3)
1817 return 1;
1818 rd = (insn >> 12) & 0xf;
1819 wrd = (insn >> 16) & 0xf;
1820 tmp = load_reg(s, rd);
1821 gen_op_iwmmxt_movq_M0_wRn(wrd);
1822 switch ((insn >> 6) & 3) {
1823 case 0:
1824 tmp2 = tcg_constant_i32(0xff);
1825 tmp3 = tcg_constant_i32((insn & 7) << 3);
1826 break;
1827 case 1:
1828 tmp2 = tcg_constant_i32(0xffff);
1829 tmp3 = tcg_constant_i32((insn & 3) << 4);
1830 break;
1831 case 2:
1832 tmp2 = tcg_constant_i32(0xffffffff);
1833 tmp3 = tcg_constant_i32((insn & 1) << 5);
1834 break;
1835 default:
1836 g_assert_not_reached();
1837 }
1838 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1839 gen_op_iwmmxt_movq_wRn_M0(wrd);
1840 gen_op_iwmmxt_set_mup();
1841 break;
1842 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1843 rd = (insn >> 12) & 0xf;
1844 wrd = (insn >> 16) & 0xf;
1845 if (rd == 15 || ((insn >> 22) & 3) == 3)
1846 return 1;
1847 gen_op_iwmmxt_movq_M0_wRn(wrd);
1848 tmp = tcg_temp_new_i32();
1849 switch ((insn >> 22) & 3) {
1850 case 0:
1851 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1852 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1853 if (insn & 8) {
1854 tcg_gen_ext8s_i32(tmp, tmp);
1855 } else {
1856 tcg_gen_andi_i32(tmp, tmp, 0xff);
1857 }
1858 break;
1859 case 1:
1860 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1861 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1862 if (insn & 8) {
1863 tcg_gen_ext16s_i32(tmp, tmp);
1864 } else {
1865 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1866 }
1867 break;
1868 case 2:
1869 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1870 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1871 break;
1872 }
1873 store_reg(s, rd, tmp);
1874 break;
1875 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1876 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1877 return 1;
1878 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1879 switch ((insn >> 22) & 3) {
1880 case 0:
1881 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1882 break;
1883 case 1:
1884 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1885 break;
1886 case 2:
1887 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1888 break;
1889 }
1890 tcg_gen_shli_i32(tmp, tmp, 28);
1891 gen_set_nzcv(tmp);
1892 break;
1893 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1894 if (((insn >> 6) & 3) == 3)
1895 return 1;
1896 rd = (insn >> 12) & 0xf;
1897 wrd = (insn >> 16) & 0xf;
1898 tmp = load_reg(s, rd);
1899 switch ((insn >> 6) & 3) {
1900 case 0:
1901 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1902 break;
1903 case 1:
1904 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1905 break;
1906 case 2:
1907 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1908 break;
1909 }
1910 gen_op_iwmmxt_movq_wRn_M0(wrd);
1911 gen_op_iwmmxt_set_mup();
1912 break;
1913 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1914 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1915 return 1;
1916 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1917 tmp2 = tcg_temp_new_i32();
1918 tcg_gen_mov_i32(tmp2, tmp);
1919 switch ((insn >> 22) & 3) {
1920 case 0:
1921 for (i = 0; i < 7; i ++) {
1922 tcg_gen_shli_i32(tmp2, tmp2, 4);
1923 tcg_gen_and_i32(tmp, tmp, tmp2);
1924 }
1925 break;
1926 case 1:
1927 for (i = 0; i < 3; i ++) {
1928 tcg_gen_shli_i32(tmp2, tmp2, 8);
1929 tcg_gen_and_i32(tmp, tmp, tmp2);
1930 }
1931 break;
1932 case 2:
1933 tcg_gen_shli_i32(tmp2, tmp2, 16);
1934 tcg_gen_and_i32(tmp, tmp, tmp2);
1935 break;
1936 }
1937 gen_set_nzcv(tmp);
1938 break;
1939 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1940 wrd = (insn >> 12) & 0xf;
1941 rd0 = (insn >> 16) & 0xf;
1942 gen_op_iwmmxt_movq_M0_wRn(rd0);
1943 switch ((insn >> 22) & 3) {
1944 case 0:
1945 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1946 break;
1947 case 1:
1948 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1949 break;
1950 case 2:
1951 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1952 break;
1953 case 3:
1954 return 1;
1955 }
1956 gen_op_iwmmxt_movq_wRn_M0(wrd);
1957 gen_op_iwmmxt_set_mup();
1958 break;
1959 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1960 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1961 return 1;
1962 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1963 tmp2 = tcg_temp_new_i32();
1964 tcg_gen_mov_i32(tmp2, tmp);
1965 switch ((insn >> 22) & 3) {
1966 case 0:
1967 for (i = 0; i < 7; i ++) {
1968 tcg_gen_shli_i32(tmp2, tmp2, 4);
1969 tcg_gen_or_i32(tmp, tmp, tmp2);
1970 }
1971 break;
1972 case 1:
1973 for (i = 0; i < 3; i ++) {
1974 tcg_gen_shli_i32(tmp2, tmp2, 8);
1975 tcg_gen_or_i32(tmp, tmp, tmp2);
1976 }
1977 break;
1978 case 2:
1979 tcg_gen_shli_i32(tmp2, tmp2, 16);
1980 tcg_gen_or_i32(tmp, tmp, tmp2);
1981 break;
1982 }
1983 gen_set_nzcv(tmp);
1984 break;
1985 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1986 rd = (insn >> 12) & 0xf;
1987 rd0 = (insn >> 16) & 0xf;
1988 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1989 return 1;
1990 gen_op_iwmmxt_movq_M0_wRn(rd0);
1991 tmp = tcg_temp_new_i32();
1992 switch ((insn >> 22) & 3) {
1993 case 0:
1994 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1995 break;
1996 case 1:
1997 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1998 break;
1999 case 2:
2000 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2001 break;
2002 }
2003 store_reg(s, rd, tmp);
2004 break;
2005 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2006 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2007 wrd = (insn >> 12) & 0xf;
2008 rd0 = (insn >> 16) & 0xf;
2009 rd1 = (insn >> 0) & 0xf;
2010 gen_op_iwmmxt_movq_M0_wRn(rd0);
2011 switch ((insn >> 22) & 3) {
2012 case 0:
2013 if (insn & (1 << 21))
2014 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2015 else
2016 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2017 break;
2018 case 1:
2019 if (insn & (1 << 21))
2020 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2021 else
2022 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2023 break;
2024 case 2:
2025 if (insn & (1 << 21))
2026 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2027 else
2028 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2029 break;
2030 case 3:
2031 return 1;
2032 }
2033 gen_op_iwmmxt_movq_wRn_M0(wrd);
2034 gen_op_iwmmxt_set_mup();
2035 gen_op_iwmmxt_set_cup();
2036 break;
2037 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2038 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2039 wrd = (insn >> 12) & 0xf;
2040 rd0 = (insn >> 16) & 0xf;
2041 gen_op_iwmmxt_movq_M0_wRn(rd0);
2042 switch ((insn >> 22) & 3) {
2043 case 0:
2044 if (insn & (1 << 21))
2045 gen_op_iwmmxt_unpacklsb_M0();
2046 else
2047 gen_op_iwmmxt_unpacklub_M0();
2048 break;
2049 case 1:
2050 if (insn & (1 << 21))
2051 gen_op_iwmmxt_unpacklsw_M0();
2052 else
2053 gen_op_iwmmxt_unpackluw_M0();
2054 break;
2055 case 2:
2056 if (insn & (1 << 21))
2057 gen_op_iwmmxt_unpacklsl_M0();
2058 else
2059 gen_op_iwmmxt_unpacklul_M0();
2060 break;
2061 case 3:
2062 return 1;
2063 }
2064 gen_op_iwmmxt_movq_wRn_M0(wrd);
2065 gen_op_iwmmxt_set_mup();
2066 gen_op_iwmmxt_set_cup();
2067 break;
2068 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2069 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2070 wrd = (insn >> 12) & 0xf;
2071 rd0 = (insn >> 16) & 0xf;
2072 gen_op_iwmmxt_movq_M0_wRn(rd0);
2073 switch ((insn >> 22) & 3) {
2074 case 0:
2075 if (insn & (1 << 21))
2076 gen_op_iwmmxt_unpackhsb_M0();
2077 else
2078 gen_op_iwmmxt_unpackhub_M0();
2079 break;
2080 case 1:
2081 if (insn & (1 << 21))
2082 gen_op_iwmmxt_unpackhsw_M0();
2083 else
2084 gen_op_iwmmxt_unpackhuw_M0();
2085 break;
2086 case 2:
2087 if (insn & (1 << 21))
2088 gen_op_iwmmxt_unpackhsl_M0();
2089 else
2090 gen_op_iwmmxt_unpackhul_M0();
2091 break;
2092 case 3:
2093 return 1;
2094 }
2095 gen_op_iwmmxt_movq_wRn_M0(wrd);
2096 gen_op_iwmmxt_set_mup();
2097 gen_op_iwmmxt_set_cup();
2098 break;
2099 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2100 case 0x214: case 0x614: case 0xa14: case 0xe14:
2101 if (((insn >> 22) & 3) == 0)
2102 return 1;
2103 wrd = (insn >> 12) & 0xf;
2104 rd0 = (insn >> 16) & 0xf;
2105 gen_op_iwmmxt_movq_M0_wRn(rd0);
2106 tmp = tcg_temp_new_i32();
2107 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2108 return 1;
2109 }
2110 switch ((insn >> 22) & 3) {
2111 case 1:
2112 gen_helper_iwmmxt_srlw(cpu_M0, tcg_env, cpu_M0, tmp);
2113 break;
2114 case 2:
2115 gen_helper_iwmmxt_srll(cpu_M0, tcg_env, cpu_M0, tmp);
2116 break;
2117 case 3:
2118 gen_helper_iwmmxt_srlq(cpu_M0, tcg_env, cpu_M0, tmp);
2119 break;
2120 }
2121 gen_op_iwmmxt_movq_wRn_M0(wrd);
2122 gen_op_iwmmxt_set_mup();
2123 gen_op_iwmmxt_set_cup();
2124 break;
2125 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2126 case 0x014: case 0x414: case 0x814: case 0xc14:
2127 if (((insn >> 22) & 3) == 0)
2128 return 1;
2129 wrd = (insn >> 12) & 0xf;
2130 rd0 = (insn >> 16) & 0xf;
2131 gen_op_iwmmxt_movq_M0_wRn(rd0);
2132 tmp = tcg_temp_new_i32();
2133 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2134 return 1;
2135 }
2136 switch ((insn >> 22) & 3) {
2137 case 1:
2138 gen_helper_iwmmxt_sraw(cpu_M0, tcg_env, cpu_M0, tmp);
2139 break;
2140 case 2:
2141 gen_helper_iwmmxt_sral(cpu_M0, tcg_env, cpu_M0, tmp);
2142 break;
2143 case 3:
2144 gen_helper_iwmmxt_sraq(cpu_M0, tcg_env, cpu_M0, tmp);
2145 break;
2146 }
2147 gen_op_iwmmxt_movq_wRn_M0(wrd);
2148 gen_op_iwmmxt_set_mup();
2149 gen_op_iwmmxt_set_cup();
2150 break;
2151 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2152 case 0x114: case 0x514: case 0x914: case 0xd14:
2153 if (((insn >> 22) & 3) == 0)
2154 return 1;
2155 wrd = (insn >> 12) & 0xf;
2156 rd0 = (insn >> 16) & 0xf;
2157 gen_op_iwmmxt_movq_M0_wRn(rd0);
2158 tmp = tcg_temp_new_i32();
2159 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2160 return 1;
2161 }
2162 switch ((insn >> 22) & 3) {
2163 case 1:
2164 gen_helper_iwmmxt_sllw(cpu_M0, tcg_env, cpu_M0, tmp);
2165 break;
2166 case 2:
2167 gen_helper_iwmmxt_slll(cpu_M0, tcg_env, cpu_M0, tmp);
2168 break;
2169 case 3:
2170 gen_helper_iwmmxt_sllq(cpu_M0, tcg_env, cpu_M0, tmp);
2171 break;
2172 }
2173 gen_op_iwmmxt_movq_wRn_M0(wrd);
2174 gen_op_iwmmxt_set_mup();
2175 gen_op_iwmmxt_set_cup();
2176 break;
2177 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2178 case 0x314: case 0x714: case 0xb14: case 0xf14:
2179 if (((insn >> 22) & 3) == 0)
2180 return 1;
2181 wrd = (insn >> 12) & 0xf;
2182 rd0 = (insn >> 16) & 0xf;
2183 gen_op_iwmmxt_movq_M0_wRn(rd0);
2184 tmp = tcg_temp_new_i32();
2185 switch ((insn >> 22) & 3) {
2186 case 1:
2187 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2188 return 1;
2189 }
2190 gen_helper_iwmmxt_rorw(cpu_M0, tcg_env, cpu_M0, tmp);
2191 break;
2192 case 2:
2193 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2194 return 1;
2195 }
2196 gen_helper_iwmmxt_rorl(cpu_M0, tcg_env, cpu_M0, tmp);
2197 break;
2198 case 3:
2199 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2200 return 1;
2201 }
2202 gen_helper_iwmmxt_rorq(cpu_M0, tcg_env, cpu_M0, tmp);
2203 break;
2204 }
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 gen_op_iwmmxt_set_cup();
2208 break;
2209 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2210 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2211 wrd = (insn >> 12) & 0xf;
2212 rd0 = (insn >> 16) & 0xf;
2213 rd1 = (insn >> 0) & 0xf;
2214 gen_op_iwmmxt_movq_M0_wRn(rd0);
2215 switch ((insn >> 22) & 3) {
2216 case 0:
2217 if (insn & (1 << 21))
2218 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2219 else
2220 gen_op_iwmmxt_minub_M0_wRn(rd1);
2221 break;
2222 case 1:
2223 if (insn & (1 << 21))
2224 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2225 else
2226 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2227 break;
2228 case 2:
2229 if (insn & (1 << 21))
2230 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2231 else
2232 gen_op_iwmmxt_minul_M0_wRn(rd1);
2233 break;
2234 case 3:
2235 return 1;
2236 }
2237 gen_op_iwmmxt_movq_wRn_M0(wrd);
2238 gen_op_iwmmxt_set_mup();
2239 break;
2240 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2241 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2242 wrd = (insn >> 12) & 0xf;
2243 rd0 = (insn >> 16) & 0xf;
2244 rd1 = (insn >> 0) & 0xf;
2245 gen_op_iwmmxt_movq_M0_wRn(rd0);
2246 switch ((insn >> 22) & 3) {
2247 case 0:
2248 if (insn & (1 << 21))
2249 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2250 else
2251 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2252 break;
2253 case 1:
2254 if (insn & (1 << 21))
2255 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2256 else
2257 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2258 break;
2259 case 2:
2260 if (insn & (1 << 21))
2261 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2262 else
2263 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2264 break;
2265 case 3:
2266 return 1;
2267 }
2268 gen_op_iwmmxt_movq_wRn_M0(wrd);
2269 gen_op_iwmmxt_set_mup();
2270 break;
2271 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2272 case 0x402: case 0x502: case 0x602: case 0x702:
2273 wrd = (insn >> 12) & 0xf;
2274 rd0 = (insn >> 16) & 0xf;
2275 rd1 = (insn >> 0) & 0xf;
2276 gen_op_iwmmxt_movq_M0_wRn(rd0);
2277 iwmmxt_load_reg(cpu_V1, rd1);
2278 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1,
2279 tcg_constant_i32((insn >> 20) & 3));
2280 gen_op_iwmmxt_movq_wRn_M0(wrd);
2281 gen_op_iwmmxt_set_mup();
2282 break;
2283 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2284 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2285 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2286 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2287 wrd = (insn >> 12) & 0xf;
2288 rd0 = (insn >> 16) & 0xf;
2289 rd1 = (insn >> 0) & 0xf;
2290 gen_op_iwmmxt_movq_M0_wRn(rd0);
2291 switch ((insn >> 20) & 0xf) {
2292 case 0x0:
2293 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2294 break;
2295 case 0x1:
2296 gen_op_iwmmxt_subub_M0_wRn(rd1);
2297 break;
2298 case 0x3:
2299 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2300 break;
2301 case 0x4:
2302 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2303 break;
2304 case 0x5:
2305 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2306 break;
2307 case 0x7:
2308 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2309 break;
2310 case 0x8:
2311 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2312 break;
2313 case 0x9:
2314 gen_op_iwmmxt_subul_M0_wRn(rd1);
2315 break;
2316 case 0xb:
2317 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2318 break;
2319 default:
2320 return 1;
2321 }
2322 gen_op_iwmmxt_movq_wRn_M0(wrd);
2323 gen_op_iwmmxt_set_mup();
2324 gen_op_iwmmxt_set_cup();
2325 break;
2326 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2327 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2328 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2329 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2330 wrd = (insn >> 12) & 0xf;
2331 rd0 = (insn >> 16) & 0xf;
2332 gen_op_iwmmxt_movq_M0_wRn(rd0);
2333 tmp = tcg_constant_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2334 gen_helper_iwmmxt_shufh(cpu_M0, tcg_env, cpu_M0, tmp);
2335 gen_op_iwmmxt_movq_wRn_M0(wrd);
2336 gen_op_iwmmxt_set_mup();
2337 gen_op_iwmmxt_set_cup();
2338 break;
2339 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2340 case 0x418: case 0x518: case 0x618: case 0x718:
2341 case 0x818: case 0x918: case 0xa18: case 0xb18:
2342 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2343 wrd = (insn >> 12) & 0xf;
2344 rd0 = (insn >> 16) & 0xf;
2345 rd1 = (insn >> 0) & 0xf;
2346 gen_op_iwmmxt_movq_M0_wRn(rd0);
2347 switch ((insn >> 20) & 0xf) {
2348 case 0x0:
2349 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2350 break;
2351 case 0x1:
2352 gen_op_iwmmxt_addub_M0_wRn(rd1);
2353 break;
2354 case 0x3:
2355 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2356 break;
2357 case 0x4:
2358 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2359 break;
2360 case 0x5:
2361 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2362 break;
2363 case 0x7:
2364 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2365 break;
2366 case 0x8:
2367 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2368 break;
2369 case 0x9:
2370 gen_op_iwmmxt_addul_M0_wRn(rd1);
2371 break;
2372 case 0xb:
2373 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2374 break;
2375 default:
2376 return 1;
2377 }
2378 gen_op_iwmmxt_movq_wRn_M0(wrd);
2379 gen_op_iwmmxt_set_mup();
2380 gen_op_iwmmxt_set_cup();
2381 break;
2382 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2383 case 0x408: case 0x508: case 0x608: case 0x708:
2384 case 0x808: case 0x908: case 0xa08: case 0xb08:
2385 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2386 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2387 return 1;
2388 wrd = (insn >> 12) & 0xf;
2389 rd0 = (insn >> 16) & 0xf;
2390 rd1 = (insn >> 0) & 0xf;
2391 gen_op_iwmmxt_movq_M0_wRn(rd0);
2392 switch ((insn >> 22) & 3) {
2393 case 1:
2394 if (insn & (1 << 21))
2395 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2396 else
2397 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2398 break;
2399 case 2:
2400 if (insn & (1 << 21))
2401 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2402 else
2403 gen_op_iwmmxt_packul_M0_wRn(rd1);
2404 break;
2405 case 3:
2406 if (insn & (1 << 21))
2407 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2408 else
2409 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2410 break;
2411 }
2412 gen_op_iwmmxt_movq_wRn_M0(wrd);
2413 gen_op_iwmmxt_set_mup();
2414 gen_op_iwmmxt_set_cup();
2415 break;
2416 case 0x201: case 0x203: case 0x205: case 0x207:
2417 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2418 case 0x211: case 0x213: case 0x215: case 0x217:
2419 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2420 wrd = (insn >> 5) & 0xf;
2421 rd0 = (insn >> 12) & 0xf;
2422 rd1 = (insn >> 0) & 0xf;
2423 if (rd0 == 0xf || rd1 == 0xf)
2424 return 1;
2425 gen_op_iwmmxt_movq_M0_wRn(wrd);
2426 tmp = load_reg(s, rd0);
2427 tmp2 = load_reg(s, rd1);
2428 switch ((insn >> 16) & 0xf) {
2429 case 0x0: /* TMIA */
2430 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2431 break;
2432 case 0x8: /* TMIAPH */
2433 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2434 break;
2435 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2436 if (insn & (1 << 16))
2437 tcg_gen_shri_i32(tmp, tmp, 16);
2438 if (insn & (1 << 17))
2439 tcg_gen_shri_i32(tmp2, tmp2, 16);
2440 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2441 break;
2442 default:
2443 return 1;
2444 }
2445 gen_op_iwmmxt_movq_wRn_M0(wrd);
2446 gen_op_iwmmxt_set_mup();
2447 break;
2448 default:
2449 return 1;
2450 }
2451
2452 return 0;
2453 }
2454
2455 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2456 (ie. an undefined instruction). */
disas_dsp_insn(DisasContext * s,uint32_t insn)2457 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2458 {
2459 int acc, rd0, rd1, rdhi, rdlo;
2460 TCGv_i32 tmp, tmp2;
2461
2462 if ((insn & 0x0ff00f10) == 0x0e200010) {
2463 /* Multiply with Internal Accumulate Format */
2464 rd0 = (insn >> 12) & 0xf;
2465 rd1 = insn & 0xf;
2466 acc = (insn >> 5) & 7;
2467
2468 if (acc != 0)
2469 return 1;
2470
2471 tmp = load_reg(s, rd0);
2472 tmp2 = load_reg(s, rd1);
2473 switch ((insn >> 16) & 0xf) {
2474 case 0x0: /* MIA */
2475 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2476 break;
2477 case 0x8: /* MIAPH */
2478 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2479 break;
2480 case 0xc: /* MIABB */
2481 case 0xd: /* MIABT */
2482 case 0xe: /* MIATB */
2483 case 0xf: /* MIATT */
2484 if (insn & (1 << 16))
2485 tcg_gen_shri_i32(tmp, tmp, 16);
2486 if (insn & (1 << 17))
2487 tcg_gen_shri_i32(tmp2, tmp2, 16);
2488 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2489 break;
2490 default:
2491 return 1;
2492 }
2493
2494 gen_op_iwmmxt_movq_wRn_M0(acc);
2495 return 0;
2496 }
2497
2498 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2499 /* Internal Accumulator Access Format */
2500 rdhi = (insn >> 16) & 0xf;
2501 rdlo = (insn >> 12) & 0xf;
2502 acc = insn & 7;
2503
2504 if (acc != 0)
2505 return 1;
2506
2507 if (insn & ARM_CP_RW_BIT) { /* MRA */
2508 iwmmxt_load_reg(cpu_V0, acc);
2509 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2510 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
2511 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2512 } else { /* MAR */
2513 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2514 iwmmxt_store_reg(cpu_V0, acc);
2515 }
2516 return 0;
2517 }
2518
2519 return 1;
2520 }
2521
gen_goto_ptr(void)2522 static void gen_goto_ptr(void)
2523 {
2524 tcg_gen_lookup_and_goto_ptr();
2525 }
2526
2527 /* This will end the TB but doesn't guarantee we'll return to
2528 * cpu_loop_exec. Any live exit_requests will be processed as we
2529 * enter the next TB.
2530 */
gen_goto_tb(DisasContext * s,int n,target_long diff)2531 static void gen_goto_tb(DisasContext *s, int n, target_long diff)
2532 {
2533 if (translator_use_goto_tb(&s->base, s->pc_curr + diff)) {
2534 /*
2535 * For pcrel, the pc must always be up-to-date on entry to
2536 * the linked TB, so that it can use simple additions for all
2537 * further adjustments. For !pcrel, the linked TB is compiled
2538 * to know its full virtual address, so we can delay the
2539 * update to pc to the unlinked path. A long chain of links
2540 * can thus avoid many updates to the PC.
2541 */
2542 if (tb_cflags(s->base.tb) & CF_PCREL) {
2543 gen_update_pc(s, diff);
2544 tcg_gen_goto_tb(n);
2545 } else {
2546 tcg_gen_goto_tb(n);
2547 gen_update_pc(s, diff);
2548 }
2549 tcg_gen_exit_tb(s->base.tb, n);
2550 } else {
2551 gen_update_pc(s, diff);
2552 gen_goto_ptr();
2553 }
2554 s->base.is_jmp = DISAS_NORETURN;
2555 }
2556
2557 /* Jump, specifying which TB number to use if we gen_goto_tb() */
gen_jmp_tb(DisasContext * s,target_long diff,int tbno)2558 static void gen_jmp_tb(DisasContext *s, target_long diff, int tbno)
2559 {
2560 if (unlikely(s->ss_active)) {
2561 /* An indirect jump so that we still trigger the debug exception. */
2562 gen_update_pc(s, diff);
2563 s->base.is_jmp = DISAS_JUMP;
2564 return;
2565 }
2566 switch (s->base.is_jmp) {
2567 case DISAS_NEXT:
2568 case DISAS_TOO_MANY:
2569 case DISAS_NORETURN:
2570 /*
2571 * The normal case: just go to the destination TB.
2572 * NB: NORETURN happens if we generate code like
2573 * gen_brcondi(l);
2574 * gen_jmp();
2575 * gen_set_label(l);
2576 * gen_jmp();
2577 * on the second call to gen_jmp().
2578 */
2579 gen_goto_tb(s, tbno, diff);
2580 break;
2581 case DISAS_UPDATE_NOCHAIN:
2582 case DISAS_UPDATE_EXIT:
2583 /*
2584 * We already decided we're leaving the TB for some other reason.
2585 * Avoid using goto_tb so we really do exit back to the main loop
2586 * and don't chain to another TB.
2587 */
2588 gen_update_pc(s, diff);
2589 gen_goto_ptr();
2590 s->base.is_jmp = DISAS_NORETURN;
2591 break;
2592 default:
2593 /*
2594 * We shouldn't be emitting code for a jump and also have
2595 * is_jmp set to one of the special cases like DISAS_SWI.
2596 */
2597 g_assert_not_reached();
2598 }
2599 }
2600
gen_jmp(DisasContext * s,target_long diff)2601 static inline void gen_jmp(DisasContext *s, target_long diff)
2602 {
2603 gen_jmp_tb(s, diff, 0);
2604 }
2605
gen_mulxy(TCGv_i32 t0,TCGv_i32 t1,int x,int y)2606 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
2607 {
2608 if (x)
2609 tcg_gen_sari_i32(t0, t0, 16);
2610 else
2611 gen_sxth(t0);
2612 if (y)
2613 tcg_gen_sari_i32(t1, t1, 16);
2614 else
2615 gen_sxth(t1);
2616 tcg_gen_mul_i32(t0, t0, t1);
2617 }
2618
2619 /* Return the mask of PSR bits set by a MSR instruction. */
msr_mask(DisasContext * s,int flags,int spsr)2620 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
2621 {
2622 uint32_t mask = 0;
2623
2624 if (flags & (1 << 0)) {
2625 mask |= 0xff;
2626 }
2627 if (flags & (1 << 1)) {
2628 mask |= 0xff00;
2629 }
2630 if (flags & (1 << 2)) {
2631 mask |= 0xff0000;
2632 }
2633 if (flags & (1 << 3)) {
2634 mask |= 0xff000000;
2635 }
2636
2637 /* Mask out undefined and reserved bits. */
2638 mask &= aarch32_cpsr_valid_mask(s->features, s->isar);
2639
2640 /* Mask out execution state. */
2641 if (!spsr) {
2642 mask &= ~CPSR_EXEC;
2643 }
2644
2645 /* Mask out privileged bits. */
2646 if (IS_USER(s)) {
2647 mask &= CPSR_USER;
2648 }
2649 return mask;
2650 }
2651
2652 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
gen_set_psr(DisasContext * s,uint32_t mask,int spsr,TCGv_i32 t0)2653 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
2654 {
2655 TCGv_i32 tmp;
2656 if (spsr) {
2657 /* ??? This is also undefined in system mode. */
2658 if (IS_USER(s))
2659 return 1;
2660
2661 tmp = load_cpu_field(spsr);
2662 tcg_gen_andi_i32(tmp, tmp, ~mask);
2663 tcg_gen_andi_i32(t0, t0, mask);
2664 tcg_gen_or_i32(tmp, tmp, t0);
2665 store_cpu_field(tmp, spsr);
2666 } else {
2667 gen_set_cpsr(t0, mask);
2668 }
2669 gen_lookup_tb(s);
2670 return 0;
2671 }
2672
2673 /* Returns nonzero if access to the PSR is not permitted. */
gen_set_psr_im(DisasContext * s,uint32_t mask,int spsr,uint32_t val)2674 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
2675 {
2676 TCGv_i32 tmp;
2677 tmp = tcg_temp_new_i32();
2678 tcg_gen_movi_i32(tmp, val);
2679 return gen_set_psr(s, mask, spsr, tmp);
2680 }
2681
msr_banked_access_decode(DisasContext * s,int r,int sysm,int rn,int * tgtmode,int * regno)2682 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
2683 int *tgtmode, int *regno)
2684 {
2685 /* Decode the r and sysm fields of MSR/MRS banked accesses into
2686 * the target mode and register number, and identify the various
2687 * unpredictable cases.
2688 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
2689 * + executed in user mode
2690 * + using R15 as the src/dest register
2691 * + accessing an unimplemented register
2692 * + accessing a register that's inaccessible at current PL/security state*
2693 * + accessing a register that you could access with a different insn
2694 * We choose to UNDEF in all these cases.
2695 * Since we don't know which of the various AArch32 modes we are in
2696 * we have to defer some checks to runtime.
2697 * Accesses to Monitor mode registers from Secure EL1 (which implies
2698 * that EL3 is AArch64) must trap to EL3.
2699 *
2700 * If the access checks fail this function will emit code to take
2701 * an exception and return false. Otherwise it will return true,
2702 * and set *tgtmode and *regno appropriately.
2703 */
2704 /* These instructions are present only in ARMv8, or in ARMv7 with the
2705 * Virtualization Extensions.
2706 */
2707 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
2708 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
2709 goto undef;
2710 }
2711
2712 if (IS_USER(s) || rn == 15) {
2713 goto undef;
2714 }
2715
2716 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
2717 * of registers into (r, sysm).
2718 */
2719 if (r) {
2720 /* SPSRs for other modes */
2721 switch (sysm) {
2722 case 0xe: /* SPSR_fiq */
2723 *tgtmode = ARM_CPU_MODE_FIQ;
2724 break;
2725 case 0x10: /* SPSR_irq */
2726 *tgtmode = ARM_CPU_MODE_IRQ;
2727 break;
2728 case 0x12: /* SPSR_svc */
2729 *tgtmode = ARM_CPU_MODE_SVC;
2730 break;
2731 case 0x14: /* SPSR_abt */
2732 *tgtmode = ARM_CPU_MODE_ABT;
2733 break;
2734 case 0x16: /* SPSR_und */
2735 *tgtmode = ARM_CPU_MODE_UND;
2736 break;
2737 case 0x1c: /* SPSR_mon */
2738 *tgtmode = ARM_CPU_MODE_MON;
2739 break;
2740 case 0x1e: /* SPSR_hyp */
2741 *tgtmode = ARM_CPU_MODE_HYP;
2742 break;
2743 default: /* unallocated */
2744 goto undef;
2745 }
2746 /* We arbitrarily assign SPSR a register number of 16. */
2747 *regno = 16;
2748 } else {
2749 /* general purpose registers for other modes */
2750 switch (sysm) {
2751 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
2752 *tgtmode = ARM_CPU_MODE_USR;
2753 *regno = sysm + 8;
2754 break;
2755 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
2756 *tgtmode = ARM_CPU_MODE_FIQ;
2757 *regno = sysm;
2758 break;
2759 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
2760 *tgtmode = ARM_CPU_MODE_IRQ;
2761 *regno = sysm & 1 ? 13 : 14;
2762 break;
2763 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
2764 *tgtmode = ARM_CPU_MODE_SVC;
2765 *regno = sysm & 1 ? 13 : 14;
2766 break;
2767 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
2768 *tgtmode = ARM_CPU_MODE_ABT;
2769 *regno = sysm & 1 ? 13 : 14;
2770 break;
2771 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
2772 *tgtmode = ARM_CPU_MODE_UND;
2773 *regno = sysm & 1 ? 13 : 14;
2774 break;
2775 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
2776 *tgtmode = ARM_CPU_MODE_MON;
2777 *regno = sysm & 1 ? 13 : 14;
2778 break;
2779 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
2780 *tgtmode = ARM_CPU_MODE_HYP;
2781 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
2782 *regno = sysm & 1 ? 13 : 17;
2783 break;
2784 default: /* unallocated */
2785 goto undef;
2786 }
2787 }
2788
2789 /* Catch the 'accessing inaccessible register' cases we can detect
2790 * at translate time.
2791 */
2792 switch (*tgtmode) {
2793 case ARM_CPU_MODE_MON:
2794 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
2795 goto undef;
2796 }
2797 if (s->current_el == 1) {
2798 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
2799 * then accesses to Mon registers trap to Secure EL2, if it exists,
2800 * otherwise EL3.
2801 */
2802 TCGv_i32 tcg_el;
2803
2804 if (arm_dc_feature(s, ARM_FEATURE_AARCH64) &&
2805 dc_isar_feature(aa64_sel2, s)) {
2806 /* Target EL is EL<3 minus SCR_EL3.EEL2> */
2807 tcg_el = load_cpu_field_low32(cp15.scr_el3);
2808 tcg_gen_sextract_i32(tcg_el, tcg_el, ctz32(SCR_EEL2), 1);
2809 tcg_gen_addi_i32(tcg_el, tcg_el, 3);
2810 } else {
2811 tcg_el = tcg_constant_i32(3);
2812 }
2813
2814 gen_exception_insn_el_v(s, 0, EXCP_UDEF,
2815 syn_uncategorized(), tcg_el);
2816 return false;
2817 }
2818 break;
2819 case ARM_CPU_MODE_HYP:
2820 /*
2821 * r13_hyp can only be accessed from Monitor mode, and so we
2822 * can forbid accesses from EL2 or below.
2823 * elr_hyp can be accessed also from Hyp mode, so forbid
2824 * accesses from EL0 or EL1.
2825 * SPSR_hyp is supposed to be in the same category as r13_hyp
2826 * and UNPREDICTABLE if accessed from anything except Monitor
2827 * mode. However there is some real-world code that will do
2828 * it because at least some hardware happens to permit the
2829 * access. (Notably a standard Cortex-R52 startup code fragment
2830 * does this.) So we permit SPSR_hyp from Hyp mode also, to allow
2831 * this (incorrect) guest code to run.
2832 */
2833 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2
2834 || (s->current_el < 3 && *regno != 16 && *regno != 17)) {
2835 goto undef;
2836 }
2837 break;
2838 default:
2839 break;
2840 }
2841
2842 return true;
2843
2844 undef:
2845 /* If we get here then some access check did not pass */
2846 gen_exception_insn(s, 0, EXCP_UDEF, syn_uncategorized());
2847 return false;
2848 }
2849
gen_msr_banked(DisasContext * s,int r,int sysm,int rn)2850 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
2851 {
2852 TCGv_i32 tcg_reg;
2853 int tgtmode = 0, regno = 0;
2854
2855 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) {
2856 return;
2857 }
2858
2859 /* Sync state because msr_banked() can raise exceptions */
2860 gen_set_condexec(s);
2861 gen_update_pc(s, 0);
2862 tcg_reg = load_reg(s, rn);
2863 gen_helper_msr_banked(tcg_env, tcg_reg,
2864 tcg_constant_i32(tgtmode),
2865 tcg_constant_i32(regno));
2866 s->base.is_jmp = DISAS_UPDATE_EXIT;
2867 }
2868
gen_mrs_banked(DisasContext * s,int r,int sysm,int rn)2869 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
2870 {
2871 TCGv_i32 tcg_reg;
2872 int tgtmode = 0, regno = 0;
2873
2874 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) {
2875 return;
2876 }
2877
2878 /* Sync state because mrs_banked() can raise exceptions */
2879 gen_set_condexec(s);
2880 gen_update_pc(s, 0);
2881 tcg_reg = tcg_temp_new_i32();
2882 gen_helper_mrs_banked(tcg_reg, tcg_env,
2883 tcg_constant_i32(tgtmode),
2884 tcg_constant_i32(regno));
2885 store_reg(s, rn, tcg_reg);
2886 s->base.is_jmp = DISAS_UPDATE_EXIT;
2887 }
2888
2889 /* Store value to PC as for an exception return (ie don't
2890 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
2891 * will do the masking based on the new value of the Thumb bit.
2892 */
store_pc_exc_ret(DisasContext * s,TCGv_i32 pc)2893 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
2894 {
2895 tcg_gen_mov_i32(cpu_R[15], pc);
2896 }
2897
2898 /* Generate a v6 exception return. Marks both values as dead. */
gen_rfe(DisasContext * s,TCGv_i32 pc,TCGv_i32 cpsr)2899 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2900 {
2901 store_pc_exc_ret(s, pc);
2902 /* The cpsr_write_eret helper will mask the low bits of PC
2903 * appropriately depending on the new Thumb bit, so it must
2904 * be called after storing the new PC.
2905 */
2906 translator_io_start(&s->base);
2907 gen_helper_cpsr_write_eret(tcg_env, cpsr);
2908 /* Must exit loop to check un-masked IRQs */
2909 s->base.is_jmp = DISAS_EXIT;
2910 }
2911
2912 /* Generate an old-style exception return. Marks pc as dead. */
gen_exception_return(DisasContext * s,TCGv_i32 pc)2913 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
2914 {
2915 gen_rfe(s, pc, load_cpu_field(spsr));
2916 }
2917
aa32_cpreg_encoding_in_impdef_space(uint8_t crn,uint8_t crm)2918 static bool aa32_cpreg_encoding_in_impdef_space(uint8_t crn, uint8_t crm)
2919 {
2920 static const uint16_t mask[3] = {
2921 0b0000000111100111, /* crn == 9, crm == {c0-c2, c5-c8} */
2922 0b0000000100010011, /* crn == 10, crm == {c0, c1, c4, c8} */
2923 0b1000000111111111, /* crn == 11, crm == {c0-c8, c15} */
2924 };
2925
2926 if (crn >= 9 && crn <= 11) {
2927 return (mask[crn - 9] >> crm) & 1;
2928 }
2929 return false;
2930 }
2931
do_coproc_insn(DisasContext * s,int cpnum,int is64,int opc1,int crn,int crm,int opc2,bool isread,int rt,int rt2)2932 static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
2933 int opc1, int crn, int crm, int opc2,
2934 bool isread, int rt, int rt2)
2935 {
2936 uint32_t key = ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2);
2937 const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key);
2938 TCGv_ptr tcg_ri = NULL;
2939 bool need_exit_tb = false;
2940 uint32_t syndrome;
2941
2942 /*
2943 * Note that since we are an implementation which takes an
2944 * exception on a trapped conditional instruction only if the
2945 * instruction passes its condition code check, we can take
2946 * advantage of the clause in the ARM ARM that allows us to set
2947 * the COND field in the instruction to 0xE in all cases.
2948 * We could fish the actual condition out of the insn (ARM)
2949 * or the condexec bits (Thumb) but it isn't necessary.
2950 */
2951 switch (cpnum) {
2952 case 14:
2953 if (is64) {
2954 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
2955 isread, false);
2956 } else {
2957 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
2958 rt, isread, false);
2959 }
2960 break;
2961 case 15:
2962 if (is64) {
2963 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
2964 isread, false);
2965 } else {
2966 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
2967 rt, isread, false);
2968 }
2969 break;
2970 default:
2971 /*
2972 * ARMv8 defines that only coprocessors 14 and 15 exist,
2973 * so this can only happen if this is an ARMv7 or earlier CPU,
2974 * in which case the syndrome information won't actually be
2975 * guest visible.
2976 */
2977 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
2978 syndrome = syn_uncategorized();
2979 break;
2980 }
2981
2982 if (s->hstr_active && cpnum == 15 && s->current_el == 1) {
2983 /*
2984 * At EL1, check for a HSTR_EL2 trap, which must take precedence
2985 * over the UNDEF for "no such register" or the UNDEF for "access
2986 * permissions forbid this EL1 access". HSTR_EL2 traps from EL0
2987 * only happen if the cpreg doesn't UNDEF at EL0, so we do those in
2988 * access_check_cp_reg(), after the checks for whether the access
2989 * configurably trapped to EL1.
2990 */
2991 uint32_t maskbit = is64 ? crm : crn;
2992
2993 if (maskbit != 4 && maskbit != 14) {
2994 /* T4 and T14 are RES0 so never cause traps */
2995 TCGv_i32 t;
2996 DisasLabel over = gen_disas_label(s);
2997
2998 t = load_cpu_offset(offsetoflow32(CPUARMState, cp15.hstr_el2));
2999 tcg_gen_andi_i32(t, t, 1u << maskbit);
3000 tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, over.label);
3001
3002 gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2);
3003 /*
3004 * gen_exception_insn() will set is_jmp to DISAS_NORETURN,
3005 * but since we're conditionally branching over it, we want
3006 * to assume continue-to-next-instruction.
3007 */
3008 s->base.is_jmp = DISAS_NEXT;
3009 set_disas_label(s, over);
3010 }
3011 }
3012
3013 if (cpnum == 15 && aa32_cpreg_encoding_in_impdef_space(crn, crm)) {
3014 /*
3015 * Check for TIDCP trap, which must take precedence over the UNDEF
3016 * for "no such register" etc. It shares precedence with HSTR,
3017 * but raises the same exception, so order doesn't matter.
3018 */
3019 switch (s->current_el) {
3020 case 0:
3021 if (arm_dc_feature(s, ARM_FEATURE_AARCH64)
3022 && dc_isar_feature(aa64_tidcp1, s)) {
3023 gen_helper_tidcp_el0(tcg_env, tcg_constant_i32(syndrome));
3024 }
3025 break;
3026 case 1:
3027 gen_helper_tidcp_el1(tcg_env, tcg_constant_i32(syndrome));
3028 break;
3029 }
3030 }
3031
3032 if (!ri) {
3033 /*
3034 * Unknown register; this might be a guest error or a QEMU
3035 * unimplemented feature.
3036 */
3037 if (is64) {
3038 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
3039 "64 bit system register cp:%d opc1: %d crm:%d "
3040 "(%s)\n",
3041 isread ? "read" : "write", cpnum, opc1, crm,
3042 s->ns ? "non-secure" : "secure");
3043 } else {
3044 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
3045 "system register cp:%d opc1:%d crn:%d crm:%d "
3046 "opc2:%d (%s)\n",
3047 isread ? "read" : "write", cpnum, opc1, crn,
3048 crm, opc2, s->ns ? "non-secure" : "secure");
3049 }
3050 unallocated_encoding(s);
3051 return;
3052 }
3053
3054 /* Check access permissions */
3055 if (!cp_access_ok(s->current_el, ri, isread)) {
3056 unallocated_encoding(s);
3057 return;
3058 }
3059
3060 if ((s->hstr_active && s->current_el == 0) || ri->accessfn ||
3061 (ri->fgt && s->fgt_active) ||
3062 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
3063 /*
3064 * Emit code to perform further access permissions checks at
3065 * runtime; this may result in an exception.
3066 * Note that on XScale all cp0..c13 registers do an access check
3067 * call in order to handle c15_cpar.
3068 */
3069 gen_set_condexec(s);
3070 gen_update_pc(s, 0);
3071 tcg_ri = tcg_temp_new_ptr();
3072 gen_helper_access_check_cp_reg(tcg_ri, tcg_env,
3073 tcg_constant_i32(key),
3074 tcg_constant_i32(syndrome),
3075 tcg_constant_i32(isread));
3076 } else if (ri->type & ARM_CP_RAISES_EXC) {
3077 /*
3078 * The readfn or writefn might raise an exception;
3079 * synchronize the CPU state in case it does.
3080 */
3081 gen_set_condexec(s);
3082 gen_update_pc(s, 0);
3083 }
3084
3085 /* Handle special cases first */
3086 switch (ri->type & ARM_CP_SPECIAL_MASK) {
3087 case 0:
3088 break;
3089 case ARM_CP_NOP:
3090 return;
3091 case ARM_CP_WFI:
3092 if (isread) {
3093 unallocated_encoding(s);
3094 } else {
3095 gen_update_pc(s, curr_insn_len(s));
3096 s->base.is_jmp = DISAS_WFI;
3097 }
3098 return;
3099 default:
3100 g_assert_not_reached();
3101 }
3102
3103 if (ri->type & ARM_CP_IO) {
3104 /* I/O operations must end the TB here (whether read or write) */
3105 need_exit_tb = translator_io_start(&s->base);
3106 }
3107
3108 if (isread) {
3109 /* Read */
3110 if (is64) {
3111 TCGv_i64 tmp64;
3112 TCGv_i32 tmp;
3113 if (ri->type & ARM_CP_CONST) {
3114 tmp64 = tcg_constant_i64(ri->resetvalue);
3115 } else if (ri->readfn) {
3116 if (!tcg_ri) {
3117 tcg_ri = gen_lookup_cp_reg(key);
3118 }
3119 tmp64 = tcg_temp_new_i64();
3120 gen_helper_get_cp_reg64(tmp64, tcg_env, tcg_ri);
3121 } else {
3122 tmp64 = tcg_temp_new_i64();
3123 tcg_gen_ld_i64(tmp64, tcg_env, ri->fieldoffset);
3124 }
3125 tmp = tcg_temp_new_i32();
3126 tcg_gen_extrl_i64_i32(tmp, tmp64);
3127 store_reg(s, rt, tmp);
3128 tmp = tcg_temp_new_i32();
3129 tcg_gen_extrh_i64_i32(tmp, tmp64);
3130 store_reg(s, rt2, tmp);
3131 } else {
3132 TCGv_i32 tmp;
3133 if (ri->type & ARM_CP_CONST) {
3134 tmp = tcg_constant_i32(ri->resetvalue);
3135 } else if (ri->readfn) {
3136 if (!tcg_ri) {
3137 tcg_ri = gen_lookup_cp_reg(key);
3138 }
3139 tmp = tcg_temp_new_i32();
3140 gen_helper_get_cp_reg(tmp, tcg_env, tcg_ri);
3141 } else {
3142 tmp = load_cpu_offset(ri->fieldoffset);
3143 }
3144 if (rt == 15) {
3145 /* Destination register of r15 for 32 bit loads sets
3146 * the condition codes from the high 4 bits of the value
3147 */
3148 gen_set_nzcv(tmp);
3149 } else {
3150 store_reg(s, rt, tmp);
3151 }
3152 }
3153 } else {
3154 /* Write */
3155 if (ri->type & ARM_CP_CONST) {
3156 /* If not forbidden by access permissions, treat as WI */
3157 return;
3158 }
3159
3160 if (is64) {
3161 TCGv_i32 tmplo, tmphi;
3162 TCGv_i64 tmp64 = tcg_temp_new_i64();
3163 tmplo = load_reg(s, rt);
3164 tmphi = load_reg(s, rt2);
3165 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
3166 if (ri->writefn) {
3167 if (!tcg_ri) {
3168 tcg_ri = gen_lookup_cp_reg(key);
3169 }
3170 gen_helper_set_cp_reg64(tcg_env, tcg_ri, tmp64);
3171 } else {
3172 tcg_gen_st_i64(tmp64, tcg_env, ri->fieldoffset);
3173 }
3174 } else {
3175 TCGv_i32 tmp = load_reg(s, rt);
3176 if (ri->writefn) {
3177 if (!tcg_ri) {
3178 tcg_ri = gen_lookup_cp_reg(key);
3179 }
3180 gen_helper_set_cp_reg(tcg_env, tcg_ri, tmp);
3181 } else {
3182 store_cpu_offset(tmp, ri->fieldoffset, 4);
3183 }
3184 }
3185 }
3186
3187 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
3188 /*
3189 * A write to any coprocessor register that ends a TB
3190 * must rebuild the hflags for the next TB.
3191 */
3192 gen_rebuild_hflags(s, ri->type & ARM_CP_NEWEL);
3193 /*
3194 * We default to ending the TB on a coprocessor register write,
3195 * but allow this to be suppressed by the register definition
3196 * (usually only necessary to work around guest bugs).
3197 */
3198 need_exit_tb = true;
3199 }
3200 if (need_exit_tb) {
3201 gen_lookup_tb(s);
3202 }
3203 }
3204
3205 /* Decode XScale DSP or iWMMXt insn (in the copro space, cp=0 or 1) */
disas_xscale_insn(DisasContext * s,uint32_t insn)3206 static void disas_xscale_insn(DisasContext *s, uint32_t insn)
3207 {
3208 int cpnum = (insn >> 8) & 0xf;
3209
3210 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
3211 unallocated_encoding(s);
3212 } else if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
3213 if (disas_iwmmxt_insn(s, insn)) {
3214 unallocated_encoding(s);
3215 }
3216 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
3217 if (disas_dsp_insn(s, insn)) {
3218 unallocated_encoding(s);
3219 }
3220 }
3221 }
3222
3223 /* Store a 64-bit value to a register pair. Clobbers val. */
gen_storeq_reg(DisasContext * s,int rlow,int rhigh,TCGv_i64 val)3224 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
3225 {
3226 TCGv_i32 tmp;
3227 tmp = tcg_temp_new_i32();
3228 tcg_gen_extrl_i64_i32(tmp, val);
3229 store_reg(s, rlow, tmp);
3230 tmp = tcg_temp_new_i32();
3231 tcg_gen_extrh_i64_i32(tmp, val);
3232 store_reg(s, rhigh, tmp);
3233 }
3234
3235 /* load and add a 64-bit value from a register pair. */
gen_addq(DisasContext * s,TCGv_i64 val,int rlow,int rhigh)3236 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
3237 {
3238 TCGv_i64 tmp;
3239 TCGv_i32 tmpl;
3240 TCGv_i32 tmph;
3241
3242 /* Load 64-bit value rd:rn. */
3243 tmpl = load_reg(s, rlow);
3244 tmph = load_reg(s, rhigh);
3245 tmp = tcg_temp_new_i64();
3246 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
3247 tcg_gen_add_i64(val, val, tmp);
3248 }
3249
3250 /* Set N and Z flags from hi|lo. */
gen_logicq_cc(TCGv_i32 lo,TCGv_i32 hi)3251 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
3252 {
3253 tcg_gen_mov_i32(cpu_NF, hi);
3254 tcg_gen_or_i32(cpu_ZF, lo, hi);
3255 }
3256
3257 /* Load/Store exclusive instructions are implemented by remembering
3258 the value/address loaded, and seeing if these are the same
3259 when the store is performed. This should be sufficient to implement
3260 the architecturally mandated semantics, and avoids having to monitor
3261 regular stores. The compare vs the remembered value is done during
3262 the cmpxchg operation, but we must compare the addresses manually. */
gen_load_exclusive(DisasContext * s,int rt,int rt2,TCGv_i32 addr,int size)3263 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
3264 TCGv_i32 addr, int size)
3265 {
3266 TCGv_i32 tmp = tcg_temp_new_i32();
3267 MemOp opc = size | MO_ALIGN | s->be_data;
3268
3269 s->is_ldex = true;
3270
3271 if (size == 3) {
3272 TCGv_i32 tmp2 = tcg_temp_new_i32();
3273 TCGv_i64 t64 = tcg_temp_new_i64();
3274
3275 /*
3276 * For AArch32, architecturally the 32-bit word at the lowest
3277 * address is always Rt and the one at addr+4 is Rt2, even if
3278 * the CPU is big-endian. That means we don't want to do a
3279 * gen_aa32_ld_i64(), which checks SCTLR_B as if for an
3280 * architecturally 64-bit access, but instead do a 64-bit access
3281 * using MO_BE if appropriate and then split the two halves.
3282 */
3283 TCGv taddr = gen_aa32_addr(s, addr, opc);
3284
3285 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
3286 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3287 if (s->be_data == MO_BE) {
3288 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
3289 } else {
3290 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
3291 }
3292 store_reg(s, rt2, tmp2);
3293 } else {
3294 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
3295 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
3296 }
3297
3298 store_reg(s, rt, tmp);
3299 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
3300 }
3301
gen_clrex(DisasContext * s)3302 static void gen_clrex(DisasContext *s)
3303 {
3304 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
3305 }
3306
gen_store_exclusive(DisasContext * s,int rd,int rt,int rt2,TCGv_i32 addr,int size)3307 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
3308 TCGv_i32 addr, int size)
3309 {
3310 TCGv_i32 t0, t1, t2;
3311 TCGv_i64 extaddr;
3312 TCGv taddr;
3313 TCGLabel *done_label;
3314 TCGLabel *fail_label;
3315 MemOp opc = size | MO_ALIGN | s->be_data;
3316
3317 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
3318 [addr] = {Rt};
3319 {Rd} = 0;
3320 } else {
3321 {Rd} = 1;
3322 } */
3323 fail_label = gen_new_label();
3324 done_label = gen_new_label();
3325 extaddr = tcg_temp_new_i64();
3326 tcg_gen_extu_i32_i64(extaddr, addr);
3327 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
3328
3329 taddr = gen_aa32_addr(s, addr, opc);
3330 t0 = tcg_temp_new_i32();
3331 t1 = load_reg(s, rt);
3332 if (size == 3) {
3333 TCGv_i64 o64 = tcg_temp_new_i64();
3334 TCGv_i64 n64 = tcg_temp_new_i64();
3335
3336 t2 = load_reg(s, rt2);
3337
3338 /*
3339 * For AArch32, architecturally the 32-bit word at the lowest
3340 * address is always Rt and the one at addr+4 is Rt2, even if
3341 * the CPU is big-endian. Since we're going to treat this as a
3342 * single 64-bit BE store, we need to put the two halves in the
3343 * opposite order for BE to LE, so that they end up in the right
3344 * places. We don't want gen_aa32_st_i64, because that checks
3345 * SCTLR_B as if for an architectural 64-bit access.
3346 */
3347 if (s->be_data == MO_BE) {
3348 tcg_gen_concat_i32_i64(n64, t2, t1);
3349 } else {
3350 tcg_gen_concat_i32_i64(n64, t1, t2);
3351 }
3352
3353 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
3354 get_mem_index(s), opc);
3355
3356 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
3357 tcg_gen_extrl_i64_i32(t0, o64);
3358 } else {
3359 t2 = tcg_temp_new_i32();
3360 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
3361 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
3362 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
3363 }
3364 tcg_gen_mov_i32(cpu_R[rd], t0);
3365 tcg_gen_br(done_label);
3366
3367 gen_set_label(fail_label);
3368 tcg_gen_movi_i32(cpu_R[rd], 1);
3369 gen_set_label(done_label);
3370 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
3371 }
3372
3373 /* gen_srs:
3374 * @env: CPUARMState
3375 * @s: DisasContext
3376 * @mode: mode field from insn (which stack to store to)
3377 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
3378 * @writeback: true if writeback bit set
3379 *
3380 * Generate code for the SRS (Store Return State) insn.
3381 */
gen_srs(DisasContext * s,uint32_t mode,uint32_t amode,bool writeback)3382 static void gen_srs(DisasContext *s,
3383 uint32_t mode, uint32_t amode, bool writeback)
3384 {
3385 int32_t offset;
3386 TCGv_i32 addr, tmp;
3387 bool undef = false;
3388
3389 /* SRS is:
3390 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
3391 * and specified mode is monitor mode
3392 * - UNDEFINED in Hyp mode
3393 * - UNPREDICTABLE in User or System mode
3394 * - UNPREDICTABLE if the specified mode is:
3395 * -- not implemented
3396 * -- not a valid mode number
3397 * -- a mode that's at a higher exception level
3398 * -- Monitor, if we are Non-secure
3399 * For the UNPREDICTABLE cases we choose to UNDEF.
3400 */
3401 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
3402 gen_exception_insn_el(s, 0, EXCP_UDEF, syn_uncategorized(), 3);
3403 return;
3404 }
3405
3406 if (s->current_el == 0 || s->current_el == 2) {
3407 undef = true;
3408 }
3409
3410 switch (mode) {
3411 case ARM_CPU_MODE_USR:
3412 case ARM_CPU_MODE_FIQ:
3413 case ARM_CPU_MODE_IRQ:
3414 case ARM_CPU_MODE_SVC:
3415 case ARM_CPU_MODE_ABT:
3416 case ARM_CPU_MODE_UND:
3417 case ARM_CPU_MODE_SYS:
3418 break;
3419 case ARM_CPU_MODE_HYP:
3420 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
3421 undef = true;
3422 }
3423 break;
3424 case ARM_CPU_MODE_MON:
3425 /* No need to check specifically for "are we non-secure" because
3426 * we've already made EL0 UNDEF and handled the trap for S-EL1;
3427 * so if this isn't EL3 then we must be non-secure.
3428 */
3429 if (s->current_el != 3) {
3430 undef = true;
3431 }
3432 break;
3433 default:
3434 undef = true;
3435 }
3436
3437 if (undef) {
3438 unallocated_encoding(s);
3439 return;
3440 }
3441
3442 addr = tcg_temp_new_i32();
3443 /* get_r13_banked() will raise an exception if called from System mode */
3444 gen_set_condexec(s);
3445 gen_update_pc(s, 0);
3446 gen_helper_get_r13_banked(addr, tcg_env, tcg_constant_i32(mode));
3447 switch (amode) {
3448 case 0: /* DA */
3449 offset = -4;
3450 break;
3451 case 1: /* IA */
3452 offset = 0;
3453 break;
3454 case 2: /* DB */
3455 offset = -8;
3456 break;
3457 case 3: /* IB */
3458 offset = 4;
3459 break;
3460 default:
3461 g_assert_not_reached();
3462 }
3463 tcg_gen_addi_i32(addr, addr, offset);
3464 tmp = load_reg(s, 14);
3465 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
3466 tmp = load_cpu_field(spsr);
3467 tcg_gen_addi_i32(addr, addr, 4);
3468 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
3469 if (writeback) {
3470 switch (amode) {
3471 case 0:
3472 offset = -8;
3473 break;
3474 case 1:
3475 offset = 4;
3476 break;
3477 case 2:
3478 offset = -4;
3479 break;
3480 case 3:
3481 offset = 0;
3482 break;
3483 default:
3484 g_assert_not_reached();
3485 }
3486 tcg_gen_addi_i32(addr, addr, offset);
3487 gen_helper_set_r13_banked(tcg_env, tcg_constant_i32(mode), addr);
3488 }
3489 s->base.is_jmp = DISAS_UPDATE_EXIT;
3490 }
3491
3492 /* Skip this instruction if the ARM condition is false */
arm_skip_unless(DisasContext * s,uint32_t cond)3493 static void arm_skip_unless(DisasContext *s, uint32_t cond)
3494 {
3495 arm_gen_condlabel(s);
3496 arm_gen_test_cc(cond ^ 1, s->condlabel.label);
3497 }
3498
3499
3500 /*
3501 * Constant expanders used by T16/T32 decode
3502 */
3503
3504 /* Return only the rotation part of T32ExpandImm. */
t32_expandimm_rot(DisasContext * s,int x)3505 static int t32_expandimm_rot(DisasContext *s, int x)
3506 {
3507 return x & 0xc00 ? extract32(x, 7, 5) : 0;
3508 }
3509
3510 /* Return the unrotated immediate from T32ExpandImm. */
t32_expandimm_imm(DisasContext * s,int x)3511 static int t32_expandimm_imm(DisasContext *s, int x)
3512 {
3513 int imm = extract32(x, 0, 8);
3514
3515 switch (extract32(x, 8, 4)) {
3516 case 0: /* XY */
3517 /* Nothing to do. */
3518 break;
3519 case 1: /* 00XY00XY */
3520 imm *= 0x00010001;
3521 break;
3522 case 2: /* XY00XY00 */
3523 imm *= 0x01000100;
3524 break;
3525 case 3: /* XYXYXYXY */
3526 imm *= 0x01010101;
3527 break;
3528 default:
3529 /* Rotated constant. */
3530 imm |= 0x80;
3531 break;
3532 }
3533 return imm;
3534 }
3535
t32_branch24(DisasContext * s,int x)3536 static int t32_branch24(DisasContext *s, int x)
3537 {
3538 /* Convert J1:J2 at x[22:21] to I2:I1, which involves I=J^~S. */
3539 x ^= !(x < 0) * (3 << 21);
3540 /* Append the final zero. */
3541 return x << 1;
3542 }
3543
t16_setflags(DisasContext * s)3544 static int t16_setflags(DisasContext *s)
3545 {
3546 return s->condexec_mask == 0;
3547 }
3548
t16_push_list(DisasContext * s,int x)3549 static int t16_push_list(DisasContext *s, int x)
3550 {
3551 return (x & 0xff) | (x & 0x100) << (14 - 8);
3552 }
3553
t16_pop_list(DisasContext * s,int x)3554 static int t16_pop_list(DisasContext *s, int x)
3555 {
3556 return (x & 0xff) | (x & 0x100) << (15 - 8);
3557 }
3558
3559 /*
3560 * Include the generated decoders.
3561 */
3562
3563 #include "decode-a32.c.inc"
3564 #include "decode-a32-uncond.c.inc"
3565 #include "decode-t32.c.inc"
3566 #include "decode-t16.c.inc"
3567
valid_cp(DisasContext * s,int cp)3568 static bool valid_cp(DisasContext *s, int cp)
3569 {
3570 /*
3571 * Return true if this coprocessor field indicates something
3572 * that's really a possible coprocessor.
3573 * For v7 and earlier, coprocessors 8..15 were reserved for Arm use,
3574 * and of those only cp14 and cp15 were used for registers.
3575 * cp10 and cp11 were used for VFP and Neon, whose decode is
3576 * dealt with elsewhere. With the advent of fp16, cp9 is also
3577 * now part of VFP.
3578 * For v8A and later, the encoding has been tightened so that
3579 * only cp14 and cp15 are valid, and other values aren't considered
3580 * to be in the coprocessor-instruction space at all. v8M still
3581 * permits coprocessors 0..7.
3582 * For XScale, we must not decode the XScale cp0, cp1 space as
3583 * a standard coprocessor insn, because we want to fall through to
3584 * the legacy disas_xscale_insn() decoder after decodetree is done.
3585 */
3586 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cp == 0 || cp == 1)) {
3587 return false;
3588 }
3589
3590 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
3591 !arm_dc_feature(s, ARM_FEATURE_M)) {
3592 return cp >= 14;
3593 }
3594 return cp < 8 || cp >= 14;
3595 }
3596
trans_MCR(DisasContext * s,arg_MCR * a)3597 static bool trans_MCR(DisasContext *s, arg_MCR *a)
3598 {
3599 if (!valid_cp(s, a->cp)) {
3600 return false;
3601 }
3602 do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2,
3603 false, a->rt, 0);
3604 return true;
3605 }
3606
trans_MRC(DisasContext * s,arg_MRC * a)3607 static bool trans_MRC(DisasContext *s, arg_MRC *a)
3608 {
3609 if (!valid_cp(s, a->cp)) {
3610 return false;
3611 }
3612 do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2,
3613 true, a->rt, 0);
3614 return true;
3615 }
3616
trans_MCRR(DisasContext * s,arg_MCRR * a)3617 static bool trans_MCRR(DisasContext *s, arg_MCRR *a)
3618 {
3619 if (!valid_cp(s, a->cp)) {
3620 return false;
3621 }
3622 do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0,
3623 false, a->rt, a->rt2);
3624 return true;
3625 }
3626
trans_MRRC(DisasContext * s,arg_MRRC * a)3627 static bool trans_MRRC(DisasContext *s, arg_MRRC *a)
3628 {
3629 if (!valid_cp(s, a->cp)) {
3630 return false;
3631 }
3632 do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0,
3633 true, a->rt, a->rt2);
3634 return true;
3635 }
3636
3637 /* Helpers to swap operands for reverse-subtract. */
gen_rsb(TCGv_i32 dst,TCGv_i32 a,TCGv_i32 b)3638 static void gen_rsb(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
3639 {
3640 tcg_gen_sub_i32(dst, b, a);
3641 }
3642
gen_rsb_CC(TCGv_i32 dst,TCGv_i32 a,TCGv_i32 b)3643 static void gen_rsb_CC(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
3644 {
3645 gen_sub_CC(dst, b, a);
3646 }
3647
gen_rsc(TCGv_i32 dest,TCGv_i32 a,TCGv_i32 b)3648 static void gen_rsc(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
3649 {
3650 gen_sub_carry(dest, b, a);
3651 }
3652
gen_rsc_CC(TCGv_i32 dest,TCGv_i32 a,TCGv_i32 b)3653 static void gen_rsc_CC(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
3654 {
3655 gen_sbc_CC(dest, b, a);
3656 }
3657
3658 /*
3659 * Helpers for the data processing routines.
3660 *
3661 * After the computation store the results back.
3662 * This may be suppressed altogether (STREG_NONE), require a runtime
3663 * check against the stack limits (STREG_SP_CHECK), or generate an
3664 * exception return. Oh, or store into a register.
3665 *
3666 * Always return true, indicating success for a trans_* function.
3667 */
3668 typedef enum {
3669 STREG_NONE,
3670 STREG_NORMAL,
3671 STREG_SP_CHECK,
3672 STREG_EXC_RET,
3673 } StoreRegKind;
3674
store_reg_kind(DisasContext * s,int rd,TCGv_i32 val,StoreRegKind kind)3675 static bool store_reg_kind(DisasContext *s, int rd,
3676 TCGv_i32 val, StoreRegKind kind)
3677 {
3678 switch (kind) {
3679 case STREG_NONE:
3680 return true;
3681 case STREG_NORMAL:
3682 /* See ALUWritePC: Interworking only from a32 mode. */
3683 if (s->thumb) {
3684 store_reg(s, rd, val);
3685 } else {
3686 store_reg_bx(s, rd, val);
3687 }
3688 return true;
3689 case STREG_SP_CHECK:
3690 store_sp_checked(s, val);
3691 return true;
3692 case STREG_EXC_RET:
3693 gen_exception_return(s, val);
3694 return true;
3695 }
3696 g_assert_not_reached();
3697 }
3698
3699 /*
3700 * Data Processing (register)
3701 *
3702 * Operate, with set flags, one register source,
3703 * one immediate shifted register source, and a destination.
3704 */
op_s_rrr_shi(DisasContext * s,arg_s_rrr_shi * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3705 static bool op_s_rrr_shi(DisasContext *s, arg_s_rrr_shi *a,
3706 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
3707 int logic_cc, StoreRegKind kind)
3708 {
3709 TCGv_i32 tmp1, tmp2;
3710
3711 tmp2 = load_reg(s, a->rm);
3712 gen_arm_shift_im(tmp2, a->shty, a->shim, logic_cc);
3713 tmp1 = load_reg(s, a->rn);
3714
3715 gen(tmp1, tmp1, tmp2);
3716
3717 if (logic_cc) {
3718 gen_logic_CC(tmp1);
3719 }
3720 return store_reg_kind(s, a->rd, tmp1, kind);
3721 }
3722
op_s_rxr_shi(DisasContext * s,arg_s_rrr_shi * a,void (* gen)(TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3723 static bool op_s_rxr_shi(DisasContext *s, arg_s_rrr_shi *a,
3724 void (*gen)(TCGv_i32, TCGv_i32),
3725 int logic_cc, StoreRegKind kind)
3726 {
3727 TCGv_i32 tmp;
3728
3729 tmp = load_reg(s, a->rm);
3730 gen_arm_shift_im(tmp, a->shty, a->shim, logic_cc);
3731
3732 gen(tmp, tmp);
3733 if (logic_cc) {
3734 gen_logic_CC(tmp);
3735 }
3736 return store_reg_kind(s, a->rd, tmp, kind);
3737 }
3738
3739 /*
3740 * Data-processing (register-shifted register)
3741 *
3742 * Operate, with set flags, one register source,
3743 * one register shifted register source, and a destination.
3744 */
op_s_rrr_shr(DisasContext * s,arg_s_rrr_shr * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3745 static bool op_s_rrr_shr(DisasContext *s, arg_s_rrr_shr *a,
3746 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
3747 int logic_cc, StoreRegKind kind)
3748 {
3749 TCGv_i32 tmp1, tmp2;
3750
3751 tmp1 = load_reg(s, a->rs);
3752 tmp2 = load_reg(s, a->rm);
3753 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
3754 tmp1 = load_reg(s, a->rn);
3755
3756 gen(tmp1, tmp1, tmp2);
3757
3758 if (logic_cc) {
3759 gen_logic_CC(tmp1);
3760 }
3761 return store_reg_kind(s, a->rd, tmp1, kind);
3762 }
3763
op_s_rxr_shr(DisasContext * s,arg_s_rrr_shr * a,void (* gen)(TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3764 static bool op_s_rxr_shr(DisasContext *s, arg_s_rrr_shr *a,
3765 void (*gen)(TCGv_i32, TCGv_i32),
3766 int logic_cc, StoreRegKind kind)
3767 {
3768 TCGv_i32 tmp1, tmp2;
3769
3770 tmp1 = load_reg(s, a->rs);
3771 tmp2 = load_reg(s, a->rm);
3772 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
3773
3774 gen(tmp2, tmp2);
3775 if (logic_cc) {
3776 gen_logic_CC(tmp2);
3777 }
3778 return store_reg_kind(s, a->rd, tmp2, kind);
3779 }
3780
3781 /*
3782 * Data-processing (immediate)
3783 *
3784 * Operate, with set flags, one register source,
3785 * one rotated immediate, and a destination.
3786 *
3787 * Note that logic_cc && a->rot setting CF based on the msb of the
3788 * immediate is the reason why we must pass in the unrotated form
3789 * of the immediate.
3790 */
op_s_rri_rot(DisasContext * s,arg_s_rri_rot * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3791 static bool op_s_rri_rot(DisasContext *s, arg_s_rri_rot *a,
3792 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
3793 int logic_cc, StoreRegKind kind)
3794 {
3795 TCGv_i32 tmp1;
3796 uint32_t imm;
3797
3798 imm = ror32(a->imm, a->rot);
3799 if (logic_cc && a->rot) {
3800 tcg_gen_movi_i32(cpu_CF, imm >> 31);
3801 }
3802 tmp1 = load_reg(s, a->rn);
3803
3804 gen(tmp1, tmp1, tcg_constant_i32(imm));
3805
3806 if (logic_cc) {
3807 gen_logic_CC(tmp1);
3808 }
3809 return store_reg_kind(s, a->rd, tmp1, kind);
3810 }
3811
op_s_rxi_rot(DisasContext * s,arg_s_rri_rot * a,void (* gen)(TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3812 static bool op_s_rxi_rot(DisasContext *s, arg_s_rri_rot *a,
3813 void (*gen)(TCGv_i32, TCGv_i32),
3814 int logic_cc, StoreRegKind kind)
3815 {
3816 TCGv_i32 tmp;
3817 uint32_t imm;
3818
3819 imm = ror32(a->imm, a->rot);
3820 if (logic_cc && a->rot) {
3821 tcg_gen_movi_i32(cpu_CF, imm >> 31);
3822 }
3823
3824 tmp = tcg_temp_new_i32();
3825 gen(tmp, tcg_constant_i32(imm));
3826
3827 if (logic_cc) {
3828 gen_logic_CC(tmp);
3829 }
3830 return store_reg_kind(s, a->rd, tmp, kind);
3831 }
3832
3833 #define DO_ANY3(NAME, OP, L, K) \
3834 static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \
3835 { StoreRegKind k = (K); return op_s_rrr_shi(s, a, OP, L, k); } \
3836 static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \
3837 { StoreRegKind k = (K); return op_s_rrr_shr(s, a, OP, L, k); } \
3838 static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \
3839 { StoreRegKind k = (K); return op_s_rri_rot(s, a, OP, L, k); }
3840
3841 #define DO_ANY2(NAME, OP, L, K) \
3842 static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \
3843 { StoreRegKind k = (K); return op_s_rxr_shi(s, a, OP, L, k); } \
3844 static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \
3845 { StoreRegKind k = (K); return op_s_rxr_shr(s, a, OP, L, k); } \
3846 static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \
3847 { StoreRegKind k = (K); return op_s_rxi_rot(s, a, OP, L, k); }
3848
3849 #define DO_CMP2(NAME, OP, L) \
3850 static bool trans_##NAME##_xrri(DisasContext *s, arg_s_rrr_shi *a) \
3851 { return op_s_rrr_shi(s, a, OP, L, STREG_NONE); } \
3852 static bool trans_##NAME##_xrrr(DisasContext *s, arg_s_rrr_shr *a) \
3853 { return op_s_rrr_shr(s, a, OP, L, STREG_NONE); } \
3854 static bool trans_##NAME##_xri(DisasContext *s, arg_s_rri_rot *a) \
3855 { return op_s_rri_rot(s, a, OP, L, STREG_NONE); }
3856
3857 DO_ANY3(AND, tcg_gen_and_i32, a->s, STREG_NORMAL)
3858 DO_ANY3(EOR, tcg_gen_xor_i32, a->s, STREG_NORMAL)
3859 DO_ANY3(ORR, tcg_gen_or_i32, a->s, STREG_NORMAL)
3860 DO_ANY3(BIC, tcg_gen_andc_i32, a->s, STREG_NORMAL)
3861
3862 DO_ANY3(RSB, a->s ? gen_rsb_CC : gen_rsb, false, STREG_NORMAL)
3863 DO_ANY3(ADC, a->s ? gen_adc_CC : gen_add_carry, false, STREG_NORMAL)
3864 DO_ANY3(SBC, a->s ? gen_sbc_CC : gen_sub_carry, false, STREG_NORMAL)
3865 DO_ANY3(RSC, a->s ? gen_rsc_CC : gen_rsc, false, STREG_NORMAL)
3866
DO_CMP2(TST,tcg_gen_and_i32,true)3867 DO_CMP2(TST, tcg_gen_and_i32, true)
3868 DO_CMP2(TEQ, tcg_gen_xor_i32, true)
3869 DO_CMP2(CMN, gen_add_CC, false)
3870 DO_CMP2(CMP, gen_sub_CC, false)
3871
3872 DO_ANY3(ADD, a->s ? gen_add_CC : tcg_gen_add_i32, false,
3873 a->rd == 13 && a->rn == 13 ? STREG_SP_CHECK : STREG_NORMAL)
3874
3875 /*
3876 * Note for the computation of StoreRegKind we return out of the
3877 * middle of the functions that are expanded by DO_ANY3, and that
3878 * we modify a->s via that parameter before it is used by OP.
3879 */
3880 DO_ANY3(SUB, a->s ? gen_sub_CC : tcg_gen_sub_i32, false,
3881 ({
3882 StoreRegKind ret = STREG_NORMAL;
3883 if (a->rd == 15 && a->s) {
3884 /*
3885 * See ALUExceptionReturn:
3886 * In User mode, UNPREDICTABLE; we choose UNDEF.
3887 * In Hyp mode, UNDEFINED.
3888 */
3889 if (IS_USER(s) || s->current_el == 2) {
3890 unallocated_encoding(s);
3891 return true;
3892 }
3893 /* There is no writeback of nzcv to PSTATE. */
3894 a->s = 0;
3895 ret = STREG_EXC_RET;
3896 } else if (a->rd == 13 && a->rn == 13) {
3897 ret = STREG_SP_CHECK;
3898 }
3899 ret;
3900 }))
3901
3902 DO_ANY2(MOV, tcg_gen_mov_i32, a->s,
3903 ({
3904 StoreRegKind ret = STREG_NORMAL;
3905 if (a->rd == 15 && a->s) {
3906 /*
3907 * See ALUExceptionReturn:
3908 * In User mode, UNPREDICTABLE; we choose UNDEF.
3909 * In Hyp mode, UNDEFINED.
3910 */
3911 if (IS_USER(s) || s->current_el == 2) {
3912 unallocated_encoding(s);
3913 return true;
3914 }
3915 /* There is no writeback of nzcv to PSTATE. */
3916 a->s = 0;
3917 ret = STREG_EXC_RET;
3918 } else if (a->rd == 13) {
3919 ret = STREG_SP_CHECK;
3920 }
3921 ret;
3922 }))
3923
3924 DO_ANY2(MVN, tcg_gen_not_i32, a->s, STREG_NORMAL)
3925
3926 /*
3927 * ORN is only available with T32, so there is no register-shifted-register
3928 * form of the insn. Using the DO_ANY3 macro would create an unused function.
3929 */
3930 static bool trans_ORN_rrri(DisasContext *s, arg_s_rrr_shi *a)
3931 {
3932 return op_s_rrr_shi(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
3933 }
3934
trans_ORN_rri(DisasContext * s,arg_s_rri_rot * a)3935 static bool trans_ORN_rri(DisasContext *s, arg_s_rri_rot *a)
3936 {
3937 return op_s_rri_rot(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
3938 }
3939
3940 #undef DO_ANY3
3941 #undef DO_ANY2
3942 #undef DO_CMP2
3943
trans_ADR(DisasContext * s,arg_ri * a)3944 static bool trans_ADR(DisasContext *s, arg_ri *a)
3945 {
3946 store_reg_bx(s, a->rd, add_reg_for_lit(s, 15, a->imm));
3947 return true;
3948 }
3949
trans_MOVW(DisasContext * s,arg_MOVW * a)3950 static bool trans_MOVW(DisasContext *s, arg_MOVW *a)
3951 {
3952 if (!ENABLE_ARCH_6T2) {
3953 return false;
3954 }
3955
3956 store_reg(s, a->rd, tcg_constant_i32(a->imm));
3957 return true;
3958 }
3959
trans_MOVT(DisasContext * s,arg_MOVW * a)3960 static bool trans_MOVT(DisasContext *s, arg_MOVW *a)
3961 {
3962 TCGv_i32 tmp;
3963
3964 if (!ENABLE_ARCH_6T2) {
3965 return false;
3966 }
3967
3968 tmp = load_reg(s, a->rd);
3969 tcg_gen_ext16u_i32(tmp, tmp);
3970 tcg_gen_ori_i32(tmp, tmp, a->imm << 16);
3971 store_reg(s, a->rd, tmp);
3972 return true;
3973 }
3974
3975 /*
3976 * v8.1M MVE wide-shifts
3977 */
do_mve_shl_ri(DisasContext * s,arg_mve_shl_ri * a,WideShiftImmFn * fn)3978 static bool do_mve_shl_ri(DisasContext *s, arg_mve_shl_ri *a,
3979 WideShiftImmFn *fn)
3980 {
3981 TCGv_i64 rda;
3982 TCGv_i32 rdalo, rdahi;
3983
3984 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
3985 /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
3986 return false;
3987 }
3988 if (a->rdahi == 15) {
3989 /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
3990 return false;
3991 }
3992 if (!dc_isar_feature(aa32_mve, s) ||
3993 !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
3994 a->rdahi == 13) {
3995 /* RdaHi == 13 is UNPREDICTABLE; we choose to UNDEF */
3996 unallocated_encoding(s);
3997 return true;
3998 }
3999
4000 if (a->shim == 0) {
4001 a->shim = 32;
4002 }
4003
4004 rda = tcg_temp_new_i64();
4005 rdalo = load_reg(s, a->rdalo);
4006 rdahi = load_reg(s, a->rdahi);
4007 tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
4008
4009 fn(rda, rda, a->shim);
4010
4011 tcg_gen_extrl_i64_i32(rdalo, rda);
4012 tcg_gen_extrh_i64_i32(rdahi, rda);
4013 store_reg(s, a->rdalo, rdalo);
4014 store_reg(s, a->rdahi, rdahi);
4015
4016 return true;
4017 }
4018
trans_ASRL_ri(DisasContext * s,arg_mve_shl_ri * a)4019 static bool trans_ASRL_ri(DisasContext *s, arg_mve_shl_ri *a)
4020 {
4021 return do_mve_shl_ri(s, a, tcg_gen_sari_i64);
4022 }
4023
trans_LSLL_ri(DisasContext * s,arg_mve_shl_ri * a)4024 static bool trans_LSLL_ri(DisasContext *s, arg_mve_shl_ri *a)
4025 {
4026 return do_mve_shl_ri(s, a, tcg_gen_shli_i64);
4027 }
4028
trans_LSRL_ri(DisasContext * s,arg_mve_shl_ri * a)4029 static bool trans_LSRL_ri(DisasContext *s, arg_mve_shl_ri *a)
4030 {
4031 return do_mve_shl_ri(s, a, tcg_gen_shri_i64);
4032 }
4033
gen_mve_sqshll(TCGv_i64 r,TCGv_i64 n,int64_t shift)4034 static void gen_mve_sqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
4035 {
4036 gen_helper_mve_sqshll(r, tcg_env, n, tcg_constant_i32(shift));
4037 }
4038
trans_SQSHLL_ri(DisasContext * s,arg_mve_shl_ri * a)4039 static bool trans_SQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
4040 {
4041 return do_mve_shl_ri(s, a, gen_mve_sqshll);
4042 }
4043
gen_mve_uqshll(TCGv_i64 r,TCGv_i64 n,int64_t shift)4044 static void gen_mve_uqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
4045 {
4046 gen_helper_mve_uqshll(r, tcg_env, n, tcg_constant_i32(shift));
4047 }
4048
trans_UQSHLL_ri(DisasContext * s,arg_mve_shl_ri * a)4049 static bool trans_UQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
4050 {
4051 return do_mve_shl_ri(s, a, gen_mve_uqshll);
4052 }
4053
trans_SRSHRL_ri(DisasContext * s,arg_mve_shl_ri * a)4054 static bool trans_SRSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
4055 {
4056 return do_mve_shl_ri(s, a, gen_srshr64_i64);
4057 }
4058
trans_URSHRL_ri(DisasContext * s,arg_mve_shl_ri * a)4059 static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
4060 {
4061 return do_mve_shl_ri(s, a, gen_urshr64_i64);
4062 }
4063
do_mve_shl_rr(DisasContext * s,arg_mve_shl_rr * a,WideShiftFn * fn)4064 static bool do_mve_shl_rr(DisasContext *s, arg_mve_shl_rr *a, WideShiftFn *fn)
4065 {
4066 TCGv_i64 rda;
4067 TCGv_i32 rdalo, rdahi;
4068
4069 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
4070 /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
4071 return false;
4072 }
4073 if (a->rdahi == 15) {
4074 /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
4075 return false;
4076 }
4077 if (!dc_isar_feature(aa32_mve, s) ||
4078 !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
4079 a->rdahi == 13 || a->rm == 13 || a->rm == 15 ||
4080 a->rm == a->rdahi || a->rm == a->rdalo) {
4081 /* These rdahi/rdalo/rm cases are UNPREDICTABLE; we choose to UNDEF */
4082 unallocated_encoding(s);
4083 return true;
4084 }
4085
4086 rda = tcg_temp_new_i64();
4087 rdalo = load_reg(s, a->rdalo);
4088 rdahi = load_reg(s, a->rdahi);
4089 tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
4090
4091 /* The helper takes care of the sign-extension of the low 8 bits of Rm */
4092 fn(rda, tcg_env, rda, cpu_R[a->rm]);
4093
4094 tcg_gen_extrl_i64_i32(rdalo, rda);
4095 tcg_gen_extrh_i64_i32(rdahi, rda);
4096 store_reg(s, a->rdalo, rdalo);
4097 store_reg(s, a->rdahi, rdahi);
4098
4099 return true;
4100 }
4101
trans_LSLL_rr(DisasContext * s,arg_mve_shl_rr * a)4102 static bool trans_LSLL_rr(DisasContext *s, arg_mve_shl_rr *a)
4103 {
4104 return do_mve_shl_rr(s, a, gen_helper_mve_ushll);
4105 }
4106
trans_ASRL_rr(DisasContext * s,arg_mve_shl_rr * a)4107 static bool trans_ASRL_rr(DisasContext *s, arg_mve_shl_rr *a)
4108 {
4109 return do_mve_shl_rr(s, a, gen_helper_mve_sshrl);
4110 }
4111
trans_UQRSHLL64_rr(DisasContext * s,arg_mve_shl_rr * a)4112 static bool trans_UQRSHLL64_rr(DisasContext *s, arg_mve_shl_rr *a)
4113 {
4114 return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll);
4115 }
4116
trans_SQRSHRL64_rr(DisasContext * s,arg_mve_shl_rr * a)4117 static bool trans_SQRSHRL64_rr(DisasContext *s, arg_mve_shl_rr *a)
4118 {
4119 return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl);
4120 }
4121
trans_UQRSHLL48_rr(DisasContext * s,arg_mve_shl_rr * a)4122 static bool trans_UQRSHLL48_rr(DisasContext *s, arg_mve_shl_rr *a)
4123 {
4124 return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll48);
4125 }
4126
trans_SQRSHRL48_rr(DisasContext * s,arg_mve_shl_rr * a)4127 static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
4128 {
4129 return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
4130 }
4131
do_mve_sh_ri(DisasContext * s,arg_mve_sh_ri * a,ShiftImmFn * fn)4132 static bool do_mve_sh_ri(DisasContext *s, arg_mve_sh_ri *a, ShiftImmFn *fn)
4133 {
4134 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
4135 /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
4136 return false;
4137 }
4138 if (!dc_isar_feature(aa32_mve, s) ||
4139 !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
4140 a->rda == 13 || a->rda == 15) {
4141 /* These rda cases are UNPREDICTABLE; we choose to UNDEF */
4142 unallocated_encoding(s);
4143 return true;
4144 }
4145
4146 if (a->shim == 0) {
4147 a->shim = 32;
4148 }
4149 fn(cpu_R[a->rda], cpu_R[a->rda], a->shim);
4150
4151 return true;
4152 }
4153
trans_URSHR_ri(DisasContext * s,arg_mve_sh_ri * a)4154 static bool trans_URSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
4155 {
4156 return do_mve_sh_ri(s, a, gen_urshr32_i32);
4157 }
4158
trans_SRSHR_ri(DisasContext * s,arg_mve_sh_ri * a)4159 static bool trans_SRSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
4160 {
4161 return do_mve_sh_ri(s, a, gen_srshr32_i32);
4162 }
4163
gen_mve_sqshl(TCGv_i32 r,TCGv_i32 n,int32_t shift)4164 static void gen_mve_sqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
4165 {
4166 gen_helper_mve_sqshl(r, tcg_env, n, tcg_constant_i32(shift));
4167 }
4168
trans_SQSHL_ri(DisasContext * s,arg_mve_sh_ri * a)4169 static bool trans_SQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
4170 {
4171 return do_mve_sh_ri(s, a, gen_mve_sqshl);
4172 }
4173
gen_mve_uqshl(TCGv_i32 r,TCGv_i32 n,int32_t shift)4174 static void gen_mve_uqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
4175 {
4176 gen_helper_mve_uqshl(r, tcg_env, n, tcg_constant_i32(shift));
4177 }
4178
trans_UQSHL_ri(DisasContext * s,arg_mve_sh_ri * a)4179 static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
4180 {
4181 return do_mve_sh_ri(s, a, gen_mve_uqshl);
4182 }
4183
do_mve_sh_rr(DisasContext * s,arg_mve_sh_rr * a,ShiftFn * fn)4184 static bool do_mve_sh_rr(DisasContext *s, arg_mve_sh_rr *a, ShiftFn *fn)
4185 {
4186 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
4187 /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
4188 return false;
4189 }
4190 if (!dc_isar_feature(aa32_mve, s) ||
4191 !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
4192 a->rda == 13 || a->rda == 15 || a->rm == 13 || a->rm == 15 ||
4193 a->rm == a->rda) {
4194 /* These rda/rm cases are UNPREDICTABLE; we choose to UNDEF */
4195 unallocated_encoding(s);
4196 return true;
4197 }
4198
4199 /* The helper takes care of the sign-extension of the low 8 bits of Rm */
4200 fn(cpu_R[a->rda], tcg_env, cpu_R[a->rda], cpu_R[a->rm]);
4201 return true;
4202 }
4203
trans_SQRSHR_rr(DisasContext * s,arg_mve_sh_rr * a)4204 static bool trans_SQRSHR_rr(DisasContext *s, arg_mve_sh_rr *a)
4205 {
4206 return do_mve_sh_rr(s, a, gen_helper_mve_sqrshr);
4207 }
4208
trans_UQRSHL_rr(DisasContext * s,arg_mve_sh_rr * a)4209 static bool trans_UQRSHL_rr(DisasContext *s, arg_mve_sh_rr *a)
4210 {
4211 return do_mve_sh_rr(s, a, gen_helper_mve_uqrshl);
4212 }
4213
4214 /*
4215 * Multiply and multiply accumulate
4216 */
4217
op_mla(DisasContext * s,arg_s_rrrr * a,bool add)4218 static bool op_mla(DisasContext *s, arg_s_rrrr *a, bool add)
4219 {
4220 TCGv_i32 t1, t2;
4221
4222 t1 = load_reg(s, a->rn);
4223 t2 = load_reg(s, a->rm);
4224 tcg_gen_mul_i32(t1, t1, t2);
4225 if (add) {
4226 t2 = load_reg(s, a->ra);
4227 tcg_gen_add_i32(t1, t1, t2);
4228 }
4229 if (a->s) {
4230 gen_logic_CC(t1);
4231 }
4232 store_reg(s, a->rd, t1);
4233 return true;
4234 }
4235
trans_MUL(DisasContext * s,arg_MUL * a)4236 static bool trans_MUL(DisasContext *s, arg_MUL *a)
4237 {
4238 return op_mla(s, a, false);
4239 }
4240
trans_MLA(DisasContext * s,arg_MLA * a)4241 static bool trans_MLA(DisasContext *s, arg_MLA *a)
4242 {
4243 return op_mla(s, a, true);
4244 }
4245
trans_MLS(DisasContext * s,arg_MLS * a)4246 static bool trans_MLS(DisasContext *s, arg_MLS *a)
4247 {
4248 TCGv_i32 t1, t2;
4249
4250 if (!ENABLE_ARCH_6T2) {
4251 return false;
4252 }
4253 t1 = load_reg(s, a->rn);
4254 t2 = load_reg(s, a->rm);
4255 tcg_gen_mul_i32(t1, t1, t2);
4256 t2 = load_reg(s, a->ra);
4257 tcg_gen_sub_i32(t1, t2, t1);
4258 store_reg(s, a->rd, t1);
4259 return true;
4260 }
4261
op_mlal(DisasContext * s,arg_s_rrrr * a,bool uns,bool add)4262 static bool op_mlal(DisasContext *s, arg_s_rrrr *a, bool uns, bool add)
4263 {
4264 TCGv_i32 t0, t1, t2, t3;
4265
4266 t0 = load_reg(s, a->rm);
4267 t1 = load_reg(s, a->rn);
4268 if (uns) {
4269 tcg_gen_mulu2_i32(t0, t1, t0, t1);
4270 } else {
4271 tcg_gen_muls2_i32(t0, t1, t0, t1);
4272 }
4273 if (add) {
4274 t2 = load_reg(s, a->ra);
4275 t3 = load_reg(s, a->rd);
4276 tcg_gen_add2_i32(t0, t1, t0, t1, t2, t3);
4277 }
4278 if (a->s) {
4279 gen_logicq_cc(t0, t1);
4280 }
4281 store_reg(s, a->ra, t0);
4282 store_reg(s, a->rd, t1);
4283 return true;
4284 }
4285
trans_UMULL(DisasContext * s,arg_UMULL * a)4286 static bool trans_UMULL(DisasContext *s, arg_UMULL *a)
4287 {
4288 return op_mlal(s, a, true, false);
4289 }
4290
trans_SMULL(DisasContext * s,arg_SMULL * a)4291 static bool trans_SMULL(DisasContext *s, arg_SMULL *a)
4292 {
4293 return op_mlal(s, a, false, false);
4294 }
4295
trans_UMLAL(DisasContext * s,arg_UMLAL * a)4296 static bool trans_UMLAL(DisasContext *s, arg_UMLAL *a)
4297 {
4298 return op_mlal(s, a, true, true);
4299 }
4300
trans_SMLAL(DisasContext * s,arg_SMLAL * a)4301 static bool trans_SMLAL(DisasContext *s, arg_SMLAL *a)
4302 {
4303 return op_mlal(s, a, false, true);
4304 }
4305
trans_UMAAL(DisasContext * s,arg_UMAAL * a)4306 static bool trans_UMAAL(DisasContext *s, arg_UMAAL *a)
4307 {
4308 TCGv_i32 t0, t1, t2, zero;
4309
4310 if (s->thumb
4311 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
4312 : !ENABLE_ARCH_6) {
4313 return false;
4314 }
4315
4316 t0 = load_reg(s, a->rm);
4317 t1 = load_reg(s, a->rn);
4318 tcg_gen_mulu2_i32(t0, t1, t0, t1);
4319 zero = tcg_constant_i32(0);
4320 t2 = load_reg(s, a->ra);
4321 tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
4322 t2 = load_reg(s, a->rd);
4323 tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
4324 store_reg(s, a->ra, t0);
4325 store_reg(s, a->rd, t1);
4326 return true;
4327 }
4328
4329 /*
4330 * Saturating addition and subtraction
4331 */
4332
op_qaddsub(DisasContext * s,arg_rrr * a,bool add,bool doub)4333 static bool op_qaddsub(DisasContext *s, arg_rrr *a, bool add, bool doub)
4334 {
4335 TCGv_i32 t0, t1;
4336
4337 if (s->thumb
4338 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
4339 : !ENABLE_ARCH_5TE) {
4340 return false;
4341 }
4342
4343 t0 = load_reg(s, a->rm);
4344 t1 = load_reg(s, a->rn);
4345 if (doub) {
4346 gen_helper_add_saturate(t1, tcg_env, t1, t1);
4347 }
4348 if (add) {
4349 gen_helper_add_saturate(t0, tcg_env, t0, t1);
4350 } else {
4351 gen_helper_sub_saturate(t0, tcg_env, t0, t1);
4352 }
4353 store_reg(s, a->rd, t0);
4354 return true;
4355 }
4356
4357 #define DO_QADDSUB(NAME, ADD, DOUB) \
4358 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
4359 { \
4360 return op_qaddsub(s, a, ADD, DOUB); \
4361 }
4362
DO_QADDSUB(QADD,true,false)4363 DO_QADDSUB(QADD, true, false)
4364 DO_QADDSUB(QSUB, false, false)
4365 DO_QADDSUB(QDADD, true, true)
4366 DO_QADDSUB(QDSUB, false, true)
4367
4368 #undef DO_QADDSUB
4369
4370 /*
4371 * Halfword multiply and multiply accumulate
4372 */
4373
4374 static bool op_smlaxxx(DisasContext *s, arg_rrrr *a,
4375 int add_long, bool nt, bool mt)
4376 {
4377 TCGv_i32 t0, t1, tl, th;
4378
4379 if (s->thumb
4380 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
4381 : !ENABLE_ARCH_5TE) {
4382 return false;
4383 }
4384
4385 t0 = load_reg(s, a->rn);
4386 t1 = load_reg(s, a->rm);
4387 gen_mulxy(t0, t1, nt, mt);
4388
4389 switch (add_long) {
4390 case 0:
4391 store_reg(s, a->rd, t0);
4392 break;
4393 case 1:
4394 t1 = load_reg(s, a->ra);
4395 gen_helper_add_setq(t0, tcg_env, t0, t1);
4396 store_reg(s, a->rd, t0);
4397 break;
4398 case 2:
4399 tl = load_reg(s, a->ra);
4400 th = load_reg(s, a->rd);
4401 /* Sign-extend the 32-bit product to 64 bits. */
4402 t1 = tcg_temp_new_i32();
4403 tcg_gen_sari_i32(t1, t0, 31);
4404 tcg_gen_add2_i32(tl, th, tl, th, t0, t1);
4405 store_reg(s, a->ra, tl);
4406 store_reg(s, a->rd, th);
4407 break;
4408 default:
4409 g_assert_not_reached();
4410 }
4411 return true;
4412 }
4413
4414 #define DO_SMLAX(NAME, add, nt, mt) \
4415 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
4416 { \
4417 return op_smlaxxx(s, a, add, nt, mt); \
4418 }
4419
4420 DO_SMLAX(SMULBB, 0, 0, 0)
4421 DO_SMLAX(SMULBT, 0, 0, 1)
4422 DO_SMLAX(SMULTB, 0, 1, 0)
4423 DO_SMLAX(SMULTT, 0, 1, 1)
4424
4425 DO_SMLAX(SMLABB, 1, 0, 0)
4426 DO_SMLAX(SMLABT, 1, 0, 1)
4427 DO_SMLAX(SMLATB, 1, 1, 0)
4428 DO_SMLAX(SMLATT, 1, 1, 1)
4429
4430 DO_SMLAX(SMLALBB, 2, 0, 0)
4431 DO_SMLAX(SMLALBT, 2, 0, 1)
4432 DO_SMLAX(SMLALTB, 2, 1, 0)
4433 DO_SMLAX(SMLALTT, 2, 1, 1)
4434
4435 #undef DO_SMLAX
4436
op_smlawx(DisasContext * s,arg_rrrr * a,bool add,bool mt)4437 static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt)
4438 {
4439 TCGv_i32 t0, t1;
4440
4441 if (!ENABLE_ARCH_5TE) {
4442 return false;
4443 }
4444
4445 t0 = load_reg(s, a->rn);
4446 t1 = load_reg(s, a->rm);
4447 /*
4448 * Since the nominal result is product<47:16>, shift the 16-bit
4449 * input up by 16 bits, so that the result is at product<63:32>.
4450 */
4451 if (mt) {
4452 tcg_gen_andi_i32(t1, t1, 0xffff0000);
4453 } else {
4454 tcg_gen_shli_i32(t1, t1, 16);
4455 }
4456 tcg_gen_muls2_i32(t0, t1, t0, t1);
4457 if (add) {
4458 t0 = load_reg(s, a->ra);
4459 gen_helper_add_setq(t1, tcg_env, t1, t0);
4460 }
4461 store_reg(s, a->rd, t1);
4462 return true;
4463 }
4464
4465 #define DO_SMLAWX(NAME, add, mt) \
4466 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
4467 { \
4468 return op_smlawx(s, a, add, mt); \
4469 }
4470
4471 DO_SMLAWX(SMULWB, 0, 0)
4472 DO_SMLAWX(SMULWT, 0, 1)
4473 DO_SMLAWX(SMLAWB, 1, 0)
4474 DO_SMLAWX(SMLAWT, 1, 1)
4475
4476 #undef DO_SMLAWX
4477
4478 /*
4479 * MSR (immediate) and hints
4480 */
4481
trans_YIELD(DisasContext * s,arg_YIELD * a)4482 static bool trans_YIELD(DisasContext *s, arg_YIELD *a)
4483 {
4484 /*
4485 * When running single-threaded TCG code, use the helper to ensure that
4486 * the next round-robin scheduled vCPU gets a crack. When running in
4487 * MTTCG we don't generate jumps to the helper as it won't affect the
4488 * scheduling of other vCPUs.
4489 */
4490 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4491 gen_update_pc(s, curr_insn_len(s));
4492 s->base.is_jmp = DISAS_YIELD;
4493 }
4494 return true;
4495 }
4496
trans_WFE(DisasContext * s,arg_WFE * a)4497 static bool trans_WFE(DisasContext *s, arg_WFE *a)
4498 {
4499 /*
4500 * When running single-threaded TCG code, use the helper to ensure that
4501 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4502 * just skip this instruction. Currently the SEV/SEVL instructions,
4503 * which are *one* of many ways to wake the CPU from WFE, are not
4504 * implemented so we can't sleep like WFI does.
4505 */
4506 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4507 gen_update_pc(s, curr_insn_len(s));
4508 s->base.is_jmp = DISAS_WFE;
4509 }
4510 return true;
4511 }
4512
trans_WFI(DisasContext * s,arg_WFI * a)4513 static bool trans_WFI(DisasContext *s, arg_WFI *a)
4514 {
4515 /* For WFI, halt the vCPU until an IRQ. */
4516 gen_update_pc(s, curr_insn_len(s));
4517 s->base.is_jmp = DISAS_WFI;
4518 return true;
4519 }
4520
trans_ESB(DisasContext * s,arg_ESB * a)4521 static bool trans_ESB(DisasContext *s, arg_ESB *a)
4522 {
4523 /*
4524 * For M-profile, minimal-RAS ESB can be a NOP.
4525 * Without RAS, we must implement this as NOP.
4526 */
4527 if (!arm_dc_feature(s, ARM_FEATURE_M) && dc_isar_feature(aa32_ras, s)) {
4528 /*
4529 * QEMU does not have a source of physical SErrors,
4530 * so we are only concerned with virtual SErrors.
4531 * The pseudocode in the ARM for this case is
4532 * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
4533 * AArch32.vESBOperation();
4534 * Most of the condition can be evaluated at translation time.
4535 * Test for EL2 present, and defer test for SEL2 to runtime.
4536 */
4537 if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
4538 gen_helper_vesb(tcg_env);
4539 }
4540 }
4541 return true;
4542 }
4543
trans_NOP(DisasContext * s,arg_NOP * a)4544 static bool trans_NOP(DisasContext *s, arg_NOP *a)
4545 {
4546 return true;
4547 }
4548
trans_MSR_imm(DisasContext * s,arg_MSR_imm * a)4549 static bool trans_MSR_imm(DisasContext *s, arg_MSR_imm *a)
4550 {
4551 uint32_t val = ror32(a->imm, a->rot * 2);
4552 uint32_t mask = msr_mask(s, a->mask, a->r);
4553
4554 if (gen_set_psr_im(s, mask, a->r, val)) {
4555 unallocated_encoding(s);
4556 }
4557 return true;
4558 }
4559
4560 /*
4561 * Cyclic Redundancy Check
4562 */
4563
op_crc32(DisasContext * s,arg_rrr * a,bool c,MemOp sz)4564 static bool op_crc32(DisasContext *s, arg_rrr *a, bool c, MemOp sz)
4565 {
4566 TCGv_i32 t1, t2, t3;
4567
4568 if (!dc_isar_feature(aa32_crc32, s)) {
4569 return false;
4570 }
4571
4572 t1 = load_reg(s, a->rn);
4573 t2 = load_reg(s, a->rm);
4574 switch (sz) {
4575 case MO_8:
4576 gen_uxtb(t2);
4577 break;
4578 case MO_16:
4579 gen_uxth(t2);
4580 break;
4581 case MO_32:
4582 break;
4583 default:
4584 g_assert_not_reached();
4585 }
4586 t3 = tcg_constant_i32(1 << sz);
4587 if (c) {
4588 gen_helper_crc32c(t1, t1, t2, t3);
4589 } else {
4590 gen_helper_crc32(t1, t1, t2, t3);
4591 }
4592 store_reg(s, a->rd, t1);
4593 return true;
4594 }
4595
4596 #define DO_CRC32(NAME, c, sz) \
4597 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
4598 { return op_crc32(s, a, c, sz); }
4599
DO_CRC32(CRC32B,false,MO_8)4600 DO_CRC32(CRC32B, false, MO_8)
4601 DO_CRC32(CRC32H, false, MO_16)
4602 DO_CRC32(CRC32W, false, MO_32)
4603 DO_CRC32(CRC32CB, true, MO_8)
4604 DO_CRC32(CRC32CH, true, MO_16)
4605 DO_CRC32(CRC32CW, true, MO_32)
4606
4607 #undef DO_CRC32
4608
4609 /*
4610 * Miscellaneous instructions
4611 */
4612
4613 static bool trans_MRS_bank(DisasContext *s, arg_MRS_bank *a)
4614 {
4615 if (arm_dc_feature(s, ARM_FEATURE_M)) {
4616 return false;
4617 }
4618 gen_mrs_banked(s, a->r, a->sysm, a->rd);
4619 return true;
4620 }
4621
trans_MSR_bank(DisasContext * s,arg_MSR_bank * a)4622 static bool trans_MSR_bank(DisasContext *s, arg_MSR_bank *a)
4623 {
4624 if (arm_dc_feature(s, ARM_FEATURE_M)) {
4625 return false;
4626 }
4627 gen_msr_banked(s, a->r, a->sysm, a->rn);
4628 return true;
4629 }
4630
trans_MRS_reg(DisasContext * s,arg_MRS_reg * a)4631 static bool trans_MRS_reg(DisasContext *s, arg_MRS_reg *a)
4632 {
4633 TCGv_i32 tmp;
4634
4635 if (arm_dc_feature(s, ARM_FEATURE_M)) {
4636 return false;
4637 }
4638 if (a->r) {
4639 if (IS_USER(s)) {
4640 unallocated_encoding(s);
4641 return true;
4642 }
4643 tmp = load_cpu_field(spsr);
4644 } else {
4645 tmp = tcg_temp_new_i32();
4646 gen_helper_cpsr_read(tmp, tcg_env);
4647 }
4648 store_reg(s, a->rd, tmp);
4649 return true;
4650 }
4651
trans_MSR_reg(DisasContext * s,arg_MSR_reg * a)4652 static bool trans_MSR_reg(DisasContext *s, arg_MSR_reg *a)
4653 {
4654 TCGv_i32 tmp;
4655 uint32_t mask = msr_mask(s, a->mask, a->r);
4656
4657 if (arm_dc_feature(s, ARM_FEATURE_M)) {
4658 return false;
4659 }
4660 tmp = load_reg(s, a->rn);
4661 if (gen_set_psr(s, mask, a->r, tmp)) {
4662 unallocated_encoding(s);
4663 }
4664 return true;
4665 }
4666
trans_MRS_v7m(DisasContext * s,arg_MRS_v7m * a)4667 static bool trans_MRS_v7m(DisasContext *s, arg_MRS_v7m *a)
4668 {
4669 TCGv_i32 tmp;
4670
4671 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
4672 return false;
4673 }
4674 tmp = tcg_temp_new_i32();
4675 gen_helper_v7m_mrs(tmp, tcg_env, tcg_constant_i32(a->sysm));
4676 store_reg(s, a->rd, tmp);
4677 return true;
4678 }
4679
trans_MSR_v7m(DisasContext * s,arg_MSR_v7m * a)4680 static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a)
4681 {
4682 TCGv_i32 addr, reg;
4683
4684 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
4685 return false;
4686 }
4687 addr = tcg_constant_i32((a->mask << 10) | a->sysm);
4688 reg = load_reg(s, a->rn);
4689 gen_helper_v7m_msr(tcg_env, addr, reg);
4690 /* If we wrote to CONTROL, the EL might have changed */
4691 gen_rebuild_hflags(s, true);
4692 gen_lookup_tb(s);
4693 return true;
4694 }
4695
trans_BX(DisasContext * s,arg_BX * a)4696 static bool trans_BX(DisasContext *s, arg_BX *a)
4697 {
4698 if (!ENABLE_ARCH_4T) {
4699 return false;
4700 }
4701 gen_bx_excret(s, load_reg(s, a->rm));
4702 return true;
4703 }
4704
trans_BXJ(DisasContext * s,arg_BXJ * a)4705 static bool trans_BXJ(DisasContext *s, arg_BXJ *a)
4706 {
4707 if (!ENABLE_ARCH_5J || arm_dc_feature(s, ARM_FEATURE_M)) {
4708 return false;
4709 }
4710 /*
4711 * v7A allows BXJ to be trapped via HSTR.TJDBX. We don't waste a
4712 * TBFLAGS bit on a basically-never-happens case, so call a helper
4713 * function to check for the trap and raise the exception if needed
4714 * (passing it the register number for the syndrome value).
4715 * v8A doesn't have this HSTR bit.
4716 */
4717 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4718 arm_dc_feature(s, ARM_FEATURE_EL2) &&
4719 s->current_el < 2 && s->ns) {
4720 gen_helper_check_bxj_trap(tcg_env, tcg_constant_i32(a->rm));
4721 }
4722 /* Trivial implementation equivalent to bx. */
4723 gen_bx(s, load_reg(s, a->rm));
4724 return true;
4725 }
4726
trans_BLX_r(DisasContext * s,arg_BLX_r * a)4727 static bool trans_BLX_r(DisasContext *s, arg_BLX_r *a)
4728 {
4729 TCGv_i32 tmp;
4730
4731 if (!ENABLE_ARCH_5) {
4732 return false;
4733 }
4734 tmp = load_reg(s, a->rm);
4735 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
4736 gen_bx(s, tmp);
4737 return true;
4738 }
4739
4740 /*
4741 * BXNS/BLXNS: only exist for v8M with the security extensions,
4742 * and always UNDEF if NonSecure. We don't implement these in
4743 * the user-only mode either (in theory you can use them from
4744 * Secure User mode but they are too tied in to system emulation).
4745 */
trans_BXNS(DisasContext * s,arg_BXNS * a)4746 static bool trans_BXNS(DisasContext *s, arg_BXNS *a)
4747 {
4748 if (!s->v8m_secure || IS_USER_ONLY) {
4749 unallocated_encoding(s);
4750 } else {
4751 gen_bxns(s, a->rm);
4752 }
4753 return true;
4754 }
4755
trans_BLXNS(DisasContext * s,arg_BLXNS * a)4756 static bool trans_BLXNS(DisasContext *s, arg_BLXNS *a)
4757 {
4758 if (!s->v8m_secure || IS_USER_ONLY) {
4759 unallocated_encoding(s);
4760 } else {
4761 gen_blxns(s, a->rm);
4762 }
4763 return true;
4764 }
4765
trans_CLZ(DisasContext * s,arg_CLZ * a)4766 static bool trans_CLZ(DisasContext *s, arg_CLZ *a)
4767 {
4768 TCGv_i32 tmp;
4769
4770 if (!ENABLE_ARCH_5) {
4771 return false;
4772 }
4773 tmp = load_reg(s, a->rm);
4774 tcg_gen_clzi_i32(tmp, tmp, 32);
4775 store_reg(s, a->rd, tmp);
4776 return true;
4777 }
4778
trans_ERET(DisasContext * s,arg_ERET * a)4779 static bool trans_ERET(DisasContext *s, arg_ERET *a)
4780 {
4781 TCGv_i32 tmp;
4782
4783 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
4784 return false;
4785 }
4786 if (IS_USER(s)) {
4787 unallocated_encoding(s);
4788 return true;
4789 }
4790 if (s->current_el == 2) {
4791 /* ERET from Hyp uses ELR_Hyp, not LR */
4792 tmp = load_cpu_field_low32(elr_el[2]);
4793 } else {
4794 tmp = load_reg(s, 14);
4795 }
4796 gen_exception_return(s, tmp);
4797 return true;
4798 }
4799
trans_HLT(DisasContext * s,arg_HLT * a)4800 static bool trans_HLT(DisasContext *s, arg_HLT *a)
4801 {
4802 gen_hlt(s, a->imm);
4803 return true;
4804 }
4805
trans_BKPT(DisasContext * s,arg_BKPT * a)4806 static bool trans_BKPT(DisasContext *s, arg_BKPT *a)
4807 {
4808 if (!ENABLE_ARCH_5) {
4809 return false;
4810 }
4811 /* BKPT is OK with ECI set and leaves it untouched */
4812 s->eci_handled = true;
4813 if (arm_dc_feature(s, ARM_FEATURE_M) &&
4814 semihosting_enabled(s->current_el == 0) &&
4815 (a->imm == 0xab)) {
4816 gen_exception_internal_insn(s, EXCP_SEMIHOST);
4817 } else {
4818 gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false));
4819 }
4820 return true;
4821 }
4822
trans_HVC(DisasContext * s,arg_HVC * a)4823 static bool trans_HVC(DisasContext *s, arg_HVC *a)
4824 {
4825 if (!ENABLE_ARCH_7 || arm_dc_feature(s, ARM_FEATURE_M)) {
4826 return false;
4827 }
4828 if (IS_USER(s)) {
4829 unallocated_encoding(s);
4830 } else {
4831 gen_hvc(s, a->imm);
4832 }
4833 return true;
4834 }
4835
trans_SMC(DisasContext * s,arg_SMC * a)4836 static bool trans_SMC(DisasContext *s, arg_SMC *a)
4837 {
4838 if (!ENABLE_ARCH_6K || arm_dc_feature(s, ARM_FEATURE_M)) {
4839 return false;
4840 }
4841 if (IS_USER(s)) {
4842 unallocated_encoding(s);
4843 } else {
4844 gen_smc(s);
4845 }
4846 return true;
4847 }
4848
trans_SG(DisasContext * s,arg_SG * a)4849 static bool trans_SG(DisasContext *s, arg_SG *a)
4850 {
4851 if (!arm_dc_feature(s, ARM_FEATURE_M) ||
4852 !arm_dc_feature(s, ARM_FEATURE_V8)) {
4853 return false;
4854 }
4855 /*
4856 * SG (v8M only)
4857 * The bulk of the behaviour for this instruction is implemented
4858 * in v7m_handle_execute_nsc(), which deals with the insn when
4859 * it is executed by a CPU in non-secure state from memory
4860 * which is Secure & NonSecure-Callable.
4861 * Here we only need to handle the remaining cases:
4862 * * in NS memory (including the "security extension not
4863 * implemented" case) : NOP
4864 * * in S memory but CPU already secure (clear IT bits)
4865 * We know that the attribute for the memory this insn is
4866 * in must match the current CPU state, because otherwise
4867 * get_phys_addr_pmsav8 would have generated an exception.
4868 */
4869 if (s->v8m_secure) {
4870 /* Like the IT insn, we don't need to generate any code */
4871 s->condexec_cond = 0;
4872 s->condexec_mask = 0;
4873 }
4874 return true;
4875 }
4876
trans_TT(DisasContext * s,arg_TT * a)4877 static bool trans_TT(DisasContext *s, arg_TT *a)
4878 {
4879 TCGv_i32 addr, tmp;
4880
4881 if (!arm_dc_feature(s, ARM_FEATURE_M) ||
4882 !arm_dc_feature(s, ARM_FEATURE_V8)) {
4883 return false;
4884 }
4885 if (a->rd == 13 || a->rd == 15 || a->rn == 15) {
4886 /* We UNDEF for these UNPREDICTABLE cases */
4887 unallocated_encoding(s);
4888 return true;
4889 }
4890 if (a->A && !s->v8m_secure) {
4891 /* This case is UNDEFINED. */
4892 unallocated_encoding(s);
4893 return true;
4894 }
4895
4896 addr = load_reg(s, a->rn);
4897 tmp = tcg_temp_new_i32();
4898 gen_helper_v7m_tt(tmp, tcg_env, addr, tcg_constant_i32((a->A << 1) | a->T));
4899 store_reg(s, a->rd, tmp);
4900 return true;
4901 }
4902
4903 /*
4904 * Load/store register index
4905 */
4906
make_issinfo(DisasContext * s,int rd,bool p,bool w)4907 static ISSInfo make_issinfo(DisasContext *s, int rd, bool p, bool w)
4908 {
4909 ISSInfo ret;
4910
4911 /* ISS not valid if writeback */
4912 if (p && !w) {
4913 ret = rd;
4914 if (curr_insn_len(s) == 2) {
4915 ret |= ISSIs16Bit;
4916 }
4917 } else {
4918 ret = ISSInvalid;
4919 }
4920 return ret;
4921 }
4922
op_addr_rr_pre(DisasContext * s,arg_ldst_rr * a)4923 static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a)
4924 {
4925 TCGv_i32 addr = load_reg(s, a->rn);
4926
4927 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
4928 gen_helper_v8m_stackcheck(tcg_env, addr);
4929 }
4930
4931 if (a->p) {
4932 TCGv_i32 ofs = load_reg(s, a->rm);
4933 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
4934 if (a->u) {
4935 tcg_gen_add_i32(addr, addr, ofs);
4936 } else {
4937 tcg_gen_sub_i32(addr, addr, ofs);
4938 }
4939 }
4940 return addr;
4941 }
4942
op_addr_rr_post(DisasContext * s,arg_ldst_rr * a,TCGv_i32 addr,int address_offset)4943 static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a,
4944 TCGv_i32 addr, int address_offset)
4945 {
4946 if (!a->p) {
4947 TCGv_i32 ofs = load_reg(s, a->rm);
4948 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
4949 if (a->u) {
4950 tcg_gen_add_i32(addr, addr, ofs);
4951 } else {
4952 tcg_gen_sub_i32(addr, addr, ofs);
4953 }
4954 } else if (!a->w) {
4955 return;
4956 }
4957 tcg_gen_addi_i32(addr, addr, address_offset);
4958 store_reg(s, a->rn, addr);
4959 }
4960
op_load_rr(DisasContext * s,arg_ldst_rr * a,MemOp mop,int mem_idx)4961 static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
4962 MemOp mop, int mem_idx)
4963 {
4964 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
4965 TCGv_i32 addr, tmp;
4966
4967 addr = op_addr_rr_pre(s, a);
4968
4969 tmp = tcg_temp_new_i32();
4970 gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
4971 disas_set_da_iss(s, mop, issinfo);
4972
4973 /*
4974 * Perform base writeback before the loaded value to
4975 * ensure correct behavior with overlapping index registers.
4976 */
4977 op_addr_rr_post(s, a, addr, 0);
4978 store_reg_from_load(s, a->rt, tmp);
4979 return true;
4980 }
4981
op_store_rr(DisasContext * s,arg_ldst_rr * a,MemOp mop,int mem_idx)4982 static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
4983 MemOp mop, int mem_idx)
4984 {
4985 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
4986 TCGv_i32 addr, tmp;
4987
4988 /*
4989 * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
4990 * is either UNPREDICTABLE or has defined behaviour
4991 */
4992 if (s->thumb && a->rn == 15) {
4993 return false;
4994 }
4995
4996 addr = op_addr_rr_pre(s, a);
4997
4998 tmp = load_reg(s, a->rt);
4999 gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
5000 disas_set_da_iss(s, mop, issinfo);
5001
5002 op_addr_rr_post(s, a, addr, 0);
5003 return true;
5004 }
5005
trans_LDRD_rr(DisasContext * s,arg_ldst_rr * a)5006 static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
5007 {
5008 int mem_idx = get_mem_index(s);
5009 TCGv_i32 addr, tmp;
5010
5011 if (!ENABLE_ARCH_5TE) {
5012 return false;
5013 }
5014 if (a->rt & 1) {
5015 unallocated_encoding(s);
5016 return true;
5017 }
5018 addr = op_addr_rr_pre(s, a);
5019
5020 tmp = tcg_temp_new_i32();
5021 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
5022 store_reg(s, a->rt, tmp);
5023
5024 tcg_gen_addi_i32(addr, addr, 4);
5025
5026 tmp = tcg_temp_new_i32();
5027 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
5028 store_reg(s, a->rt + 1, tmp);
5029
5030 /* LDRD w/ base writeback is undefined if the registers overlap. */
5031 op_addr_rr_post(s, a, addr, -4);
5032 return true;
5033 }
5034
trans_STRD_rr(DisasContext * s,arg_ldst_rr * a)5035 static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
5036 {
5037 int mem_idx = get_mem_index(s);
5038 TCGv_i32 addr, tmp;
5039
5040 if (!ENABLE_ARCH_5TE) {
5041 return false;
5042 }
5043 if (a->rt & 1) {
5044 unallocated_encoding(s);
5045 return true;
5046 }
5047 addr = op_addr_rr_pre(s, a);
5048
5049 tmp = load_reg(s, a->rt);
5050 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
5051
5052 tcg_gen_addi_i32(addr, addr, 4);
5053
5054 tmp = load_reg(s, a->rt + 1);
5055 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
5056
5057 op_addr_rr_post(s, a, addr, -4);
5058 return true;
5059 }
5060
5061 /*
5062 * Load/store immediate index
5063 */
5064
op_addr_ri_pre(DisasContext * s,arg_ldst_ri * a)5065 static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a)
5066 {
5067 int ofs = a->imm;
5068
5069 if (!a->u) {
5070 ofs = -ofs;
5071 }
5072
5073 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
5074 /*
5075 * Stackcheck. Here we know 'addr' is the current SP;
5076 * U is set if we're moving SP up, else down. It is
5077 * UNKNOWN whether the limit check triggers when SP starts
5078 * below the limit and ends up above it; we chose to do so.
5079 */
5080 if (!a->u) {
5081 TCGv_i32 newsp = tcg_temp_new_i32();
5082 tcg_gen_addi_i32(newsp, cpu_R[13], ofs);
5083 gen_helper_v8m_stackcheck(tcg_env, newsp);
5084 } else {
5085 gen_helper_v8m_stackcheck(tcg_env, cpu_R[13]);
5086 }
5087 }
5088
5089 return add_reg_for_lit(s, a->rn, a->p ? ofs : 0);
5090 }
5091
op_addr_ri_post(DisasContext * s,arg_ldst_ri * a,TCGv_i32 addr,int address_offset)5092 static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a,
5093 TCGv_i32 addr, int address_offset)
5094 {
5095 if (!a->p) {
5096 if (a->u) {
5097 address_offset += a->imm;
5098 } else {
5099 address_offset -= a->imm;
5100 }
5101 } else if (!a->w) {
5102 return;
5103 }
5104 tcg_gen_addi_i32(addr, addr, address_offset);
5105 store_reg(s, a->rn, addr);
5106 }
5107
op_load_ri(DisasContext * s,arg_ldst_ri * a,MemOp mop,int mem_idx)5108 static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
5109 MemOp mop, int mem_idx)
5110 {
5111 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
5112 TCGv_i32 addr, tmp;
5113
5114 addr = op_addr_ri_pre(s, a);
5115
5116 tmp = tcg_temp_new_i32();
5117 gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
5118 disas_set_da_iss(s, mop, issinfo);
5119
5120 /*
5121 * Perform base writeback before the loaded value to
5122 * ensure correct behavior with overlapping index registers.
5123 */
5124 op_addr_ri_post(s, a, addr, 0);
5125 store_reg_from_load(s, a->rt, tmp);
5126 return true;
5127 }
5128
op_store_ri(DisasContext * s,arg_ldst_ri * a,MemOp mop,int mem_idx)5129 static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
5130 MemOp mop, int mem_idx)
5131 {
5132 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
5133 TCGv_i32 addr, tmp;
5134
5135 /*
5136 * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
5137 * is either UNPREDICTABLE or has defined behaviour
5138 */
5139 if (s->thumb && a->rn == 15) {
5140 return false;
5141 }
5142
5143 addr = op_addr_ri_pre(s, a);
5144
5145 tmp = load_reg(s, a->rt);
5146 gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
5147 disas_set_da_iss(s, mop, issinfo);
5148
5149 op_addr_ri_post(s, a, addr, 0);
5150 return true;
5151 }
5152
op_ldrd_ri(DisasContext * s,arg_ldst_ri * a,int rt2)5153 static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
5154 {
5155 int mem_idx = get_mem_index(s);
5156 TCGv_i32 addr, tmp;
5157
5158 addr = op_addr_ri_pre(s, a);
5159
5160 tmp = tcg_temp_new_i32();
5161 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
5162 store_reg(s, a->rt, tmp);
5163
5164 tcg_gen_addi_i32(addr, addr, 4);
5165
5166 tmp = tcg_temp_new_i32();
5167 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
5168 store_reg(s, rt2, tmp);
5169
5170 /* LDRD w/ base writeback is undefined if the registers overlap. */
5171 op_addr_ri_post(s, a, addr, -4);
5172 return true;
5173 }
5174
trans_LDRD_ri_a32(DisasContext * s,arg_ldst_ri * a)5175 static bool trans_LDRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
5176 {
5177 if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
5178 return false;
5179 }
5180 return op_ldrd_ri(s, a, a->rt + 1);
5181 }
5182
trans_LDRD_ri_t32(DisasContext * s,arg_ldst_ri2 * a)5183 static bool trans_LDRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
5184 {
5185 arg_ldst_ri b = {
5186 .u = a->u, .w = a->w, .p = a->p,
5187 .rn = a->rn, .rt = a->rt, .imm = a->imm
5188 };
5189 return op_ldrd_ri(s, &b, a->rt2);
5190 }
5191
op_strd_ri(DisasContext * s,arg_ldst_ri * a,int rt2)5192 static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
5193 {
5194 int mem_idx = get_mem_index(s);
5195 TCGv_i32 addr, tmp;
5196
5197 addr = op_addr_ri_pre(s, a);
5198
5199 tmp = load_reg(s, a->rt);
5200 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
5201
5202 tcg_gen_addi_i32(addr, addr, 4);
5203
5204 tmp = load_reg(s, rt2);
5205 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
5206
5207 op_addr_ri_post(s, a, addr, -4);
5208 return true;
5209 }
5210
trans_STRD_ri_a32(DisasContext * s,arg_ldst_ri * a)5211 static bool trans_STRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
5212 {
5213 if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
5214 return false;
5215 }
5216 return op_strd_ri(s, a, a->rt + 1);
5217 }
5218
trans_STRD_ri_t32(DisasContext * s,arg_ldst_ri2 * a)5219 static bool trans_STRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
5220 {
5221 arg_ldst_ri b = {
5222 .u = a->u, .w = a->w, .p = a->p,
5223 .rn = a->rn, .rt = a->rt, .imm = a->imm
5224 };
5225 return op_strd_ri(s, &b, a->rt2);
5226 }
5227
5228 #define DO_LDST(NAME, WHICH, MEMOP) \
5229 static bool trans_##NAME##_ri(DisasContext *s, arg_ldst_ri *a) \
5230 { \
5231 return op_##WHICH##_ri(s, a, MEMOP, get_mem_index(s)); \
5232 } \
5233 static bool trans_##NAME##T_ri(DisasContext *s, arg_ldst_ri *a) \
5234 { \
5235 return op_##WHICH##_ri(s, a, MEMOP, get_a32_user_mem_index(s)); \
5236 } \
5237 static bool trans_##NAME##_rr(DisasContext *s, arg_ldst_rr *a) \
5238 { \
5239 return op_##WHICH##_rr(s, a, MEMOP, get_mem_index(s)); \
5240 } \
5241 static bool trans_##NAME##T_rr(DisasContext *s, arg_ldst_rr *a) \
5242 { \
5243 return op_##WHICH##_rr(s, a, MEMOP, get_a32_user_mem_index(s)); \
5244 }
5245
DO_LDST(LDR,load,MO_UL)5246 DO_LDST(LDR, load, MO_UL)
5247 DO_LDST(LDRB, load, MO_UB)
5248 DO_LDST(LDRH, load, MO_UW)
5249 DO_LDST(LDRSB, load, MO_SB)
5250 DO_LDST(LDRSH, load, MO_SW)
5251
5252 DO_LDST(STR, store, MO_UL)
5253 DO_LDST(STRB, store, MO_UB)
5254 DO_LDST(STRH, store, MO_UW)
5255
5256 #undef DO_LDST
5257
5258 /*
5259 * Synchronization primitives
5260 */
5261
5262 static bool op_swp(DisasContext *s, arg_SWP *a, MemOp opc)
5263 {
5264 TCGv_i32 addr, tmp;
5265 TCGv taddr;
5266
5267 opc |= s->be_data;
5268 addr = load_reg(s, a->rn);
5269 taddr = gen_aa32_addr(s, addr, opc);
5270
5271 tmp = load_reg(s, a->rt2);
5272 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp, get_mem_index(s), opc);
5273
5274 store_reg(s, a->rt, tmp);
5275 return true;
5276 }
5277
trans_SWP(DisasContext * s,arg_SWP * a)5278 static bool trans_SWP(DisasContext *s, arg_SWP *a)
5279 {
5280 return op_swp(s, a, MO_UL | MO_ALIGN);
5281 }
5282
trans_SWPB(DisasContext * s,arg_SWP * a)5283 static bool trans_SWPB(DisasContext *s, arg_SWP *a)
5284 {
5285 return op_swp(s, a, MO_UB);
5286 }
5287
5288 /*
5289 * Load/Store Exclusive and Load-Acquire/Store-Release
5290 */
5291
op_strex(DisasContext * s,arg_STREX * a,MemOp mop,bool rel)5292 static bool op_strex(DisasContext *s, arg_STREX *a, MemOp mop, bool rel)
5293 {
5294 TCGv_i32 addr;
5295 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
5296 bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
5297
5298 /* We UNDEF for these UNPREDICTABLE cases. */
5299 if (a->rd == 15 || a->rn == 15 || a->rt == 15
5300 || a->rd == a->rn || a->rd == a->rt
5301 || (!v8a && s->thumb && (a->rd == 13 || a->rt == 13))
5302 || (mop == MO_64
5303 && (a->rt2 == 15
5304 || a->rd == a->rt2
5305 || (!v8a && s->thumb && a->rt2 == 13)))) {
5306 unallocated_encoding(s);
5307 return true;
5308 }
5309
5310 if (rel) {
5311 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
5312 }
5313
5314 addr = tcg_temp_new_i32();
5315 load_reg_var(s, addr, a->rn);
5316 tcg_gen_addi_i32(addr, addr, a->imm);
5317
5318 gen_store_exclusive(s, a->rd, a->rt, a->rt2, addr, mop);
5319 return true;
5320 }
5321
trans_STREX(DisasContext * s,arg_STREX * a)5322 static bool trans_STREX(DisasContext *s, arg_STREX *a)
5323 {
5324 if (!ENABLE_ARCH_6) {
5325 return false;
5326 }
5327 return op_strex(s, a, MO_32, false);
5328 }
5329
trans_STREXD_a32(DisasContext * s,arg_STREX * a)5330 static bool trans_STREXD_a32(DisasContext *s, arg_STREX *a)
5331 {
5332 if (!ENABLE_ARCH_6K) {
5333 return false;
5334 }
5335 /* We UNDEF for these UNPREDICTABLE cases. */
5336 if (a->rt & 1) {
5337 unallocated_encoding(s);
5338 return true;
5339 }
5340 a->rt2 = a->rt + 1;
5341 return op_strex(s, a, MO_64, false);
5342 }
5343
trans_STREXD_t32(DisasContext * s,arg_STREX * a)5344 static bool trans_STREXD_t32(DisasContext *s, arg_STREX *a)
5345 {
5346 return op_strex(s, a, MO_64, false);
5347 }
5348
trans_STREXB(DisasContext * s,arg_STREX * a)5349 static bool trans_STREXB(DisasContext *s, arg_STREX *a)
5350 {
5351 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
5352 return false;
5353 }
5354 return op_strex(s, a, MO_8, false);
5355 }
5356
trans_STREXH(DisasContext * s,arg_STREX * a)5357 static bool trans_STREXH(DisasContext *s, arg_STREX *a)
5358 {
5359 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
5360 return false;
5361 }
5362 return op_strex(s, a, MO_16, false);
5363 }
5364
trans_STLEX(DisasContext * s,arg_STREX * a)5365 static bool trans_STLEX(DisasContext *s, arg_STREX *a)
5366 {
5367 if (!ENABLE_ARCH_8) {
5368 return false;
5369 }
5370 return op_strex(s, a, MO_32, true);
5371 }
5372
trans_STLEXD_a32(DisasContext * s,arg_STREX * a)5373 static bool trans_STLEXD_a32(DisasContext *s, arg_STREX *a)
5374 {
5375 if (!ENABLE_ARCH_8) {
5376 return false;
5377 }
5378 /* We UNDEF for these UNPREDICTABLE cases. */
5379 if (a->rt & 1) {
5380 unallocated_encoding(s);
5381 return true;
5382 }
5383 a->rt2 = a->rt + 1;
5384 return op_strex(s, a, MO_64, true);
5385 }
5386
trans_STLEXD_t32(DisasContext * s,arg_STREX * a)5387 static bool trans_STLEXD_t32(DisasContext *s, arg_STREX *a)
5388 {
5389 if (!ENABLE_ARCH_8) {
5390 return false;
5391 }
5392 return op_strex(s, a, MO_64, true);
5393 }
5394
trans_STLEXB(DisasContext * s,arg_STREX * a)5395 static bool trans_STLEXB(DisasContext *s, arg_STREX *a)
5396 {
5397 if (!ENABLE_ARCH_8) {
5398 return false;
5399 }
5400 return op_strex(s, a, MO_8, true);
5401 }
5402
trans_STLEXH(DisasContext * s,arg_STREX * a)5403 static bool trans_STLEXH(DisasContext *s, arg_STREX *a)
5404 {
5405 if (!ENABLE_ARCH_8) {
5406 return false;
5407 }
5408 return op_strex(s, a, MO_16, true);
5409 }
5410
op_stl(DisasContext * s,arg_STL * a,MemOp mop)5411 static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
5412 {
5413 TCGv_i32 addr, tmp;
5414
5415 if (!ENABLE_ARCH_8) {
5416 return false;
5417 }
5418 /* We UNDEF for these UNPREDICTABLE cases. */
5419 if (a->rn == 15 || a->rt == 15) {
5420 unallocated_encoding(s);
5421 return true;
5422 }
5423
5424 addr = load_reg(s, a->rn);
5425 tmp = load_reg(s, a->rt);
5426 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
5427 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
5428 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
5429
5430 return true;
5431 }
5432
trans_STL(DisasContext * s,arg_STL * a)5433 static bool trans_STL(DisasContext *s, arg_STL *a)
5434 {
5435 return op_stl(s, a, MO_UL);
5436 }
5437
trans_STLB(DisasContext * s,arg_STL * a)5438 static bool trans_STLB(DisasContext *s, arg_STL *a)
5439 {
5440 return op_stl(s, a, MO_UB);
5441 }
5442
trans_STLH(DisasContext * s,arg_STL * a)5443 static bool trans_STLH(DisasContext *s, arg_STL *a)
5444 {
5445 return op_stl(s, a, MO_UW);
5446 }
5447
op_ldrex(DisasContext * s,arg_LDREX * a,MemOp mop,bool acq)5448 static bool op_ldrex(DisasContext *s, arg_LDREX *a, MemOp mop, bool acq)
5449 {
5450 TCGv_i32 addr;
5451 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
5452 bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
5453
5454 /* We UNDEF for these UNPREDICTABLE cases. */
5455 if (a->rn == 15 || a->rt == 15
5456 || (!v8a && s->thumb && a->rt == 13)
5457 || (mop == MO_64
5458 && (a->rt2 == 15 || a->rt == a->rt2
5459 || (!v8a && s->thumb && a->rt2 == 13)))) {
5460 unallocated_encoding(s);
5461 return true;
5462 }
5463
5464 addr = tcg_temp_new_i32();
5465 load_reg_var(s, addr, a->rn);
5466 tcg_gen_addi_i32(addr, addr, a->imm);
5467
5468 gen_load_exclusive(s, a->rt, a->rt2, addr, mop);
5469
5470 if (acq) {
5471 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
5472 }
5473 return true;
5474 }
5475
trans_LDREX(DisasContext * s,arg_LDREX * a)5476 static bool trans_LDREX(DisasContext *s, arg_LDREX *a)
5477 {
5478 if (!ENABLE_ARCH_6) {
5479 return false;
5480 }
5481 return op_ldrex(s, a, MO_32, false);
5482 }
5483
trans_LDREXD_a32(DisasContext * s,arg_LDREX * a)5484 static bool trans_LDREXD_a32(DisasContext *s, arg_LDREX *a)
5485 {
5486 if (!ENABLE_ARCH_6K) {
5487 return false;
5488 }
5489 /* We UNDEF for these UNPREDICTABLE cases. */
5490 if (a->rt & 1) {
5491 unallocated_encoding(s);
5492 return true;
5493 }
5494 a->rt2 = a->rt + 1;
5495 return op_ldrex(s, a, MO_64, false);
5496 }
5497
trans_LDREXD_t32(DisasContext * s,arg_LDREX * a)5498 static bool trans_LDREXD_t32(DisasContext *s, arg_LDREX *a)
5499 {
5500 return op_ldrex(s, a, MO_64, false);
5501 }
5502
trans_LDREXB(DisasContext * s,arg_LDREX * a)5503 static bool trans_LDREXB(DisasContext *s, arg_LDREX *a)
5504 {
5505 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
5506 return false;
5507 }
5508 return op_ldrex(s, a, MO_8, false);
5509 }
5510
trans_LDREXH(DisasContext * s,arg_LDREX * a)5511 static bool trans_LDREXH(DisasContext *s, arg_LDREX *a)
5512 {
5513 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
5514 return false;
5515 }
5516 return op_ldrex(s, a, MO_16, false);
5517 }
5518
trans_LDAEX(DisasContext * s,arg_LDREX * a)5519 static bool trans_LDAEX(DisasContext *s, arg_LDREX *a)
5520 {
5521 if (!ENABLE_ARCH_8) {
5522 return false;
5523 }
5524 return op_ldrex(s, a, MO_32, true);
5525 }
5526
trans_LDAEXD_a32(DisasContext * s,arg_LDREX * a)5527 static bool trans_LDAEXD_a32(DisasContext *s, arg_LDREX *a)
5528 {
5529 if (!ENABLE_ARCH_8) {
5530 return false;
5531 }
5532 /* We UNDEF for these UNPREDICTABLE cases. */
5533 if (a->rt & 1) {
5534 unallocated_encoding(s);
5535 return true;
5536 }
5537 a->rt2 = a->rt + 1;
5538 return op_ldrex(s, a, MO_64, true);
5539 }
5540
trans_LDAEXD_t32(DisasContext * s,arg_LDREX * a)5541 static bool trans_LDAEXD_t32(DisasContext *s, arg_LDREX *a)
5542 {
5543 if (!ENABLE_ARCH_8) {
5544 return false;
5545 }
5546 return op_ldrex(s, a, MO_64, true);
5547 }
5548
trans_LDAEXB(DisasContext * s,arg_LDREX * a)5549 static bool trans_LDAEXB(DisasContext *s, arg_LDREX *a)
5550 {
5551 if (!ENABLE_ARCH_8) {
5552 return false;
5553 }
5554 return op_ldrex(s, a, MO_8, true);
5555 }
5556
trans_LDAEXH(DisasContext * s,arg_LDREX * a)5557 static bool trans_LDAEXH(DisasContext *s, arg_LDREX *a)
5558 {
5559 if (!ENABLE_ARCH_8) {
5560 return false;
5561 }
5562 return op_ldrex(s, a, MO_16, true);
5563 }
5564
op_lda(DisasContext * s,arg_LDA * a,MemOp mop)5565 static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
5566 {
5567 TCGv_i32 addr, tmp;
5568
5569 if (!ENABLE_ARCH_8) {
5570 return false;
5571 }
5572 /* We UNDEF for these UNPREDICTABLE cases. */
5573 if (a->rn == 15 || a->rt == 15) {
5574 unallocated_encoding(s);
5575 return true;
5576 }
5577
5578 addr = load_reg(s, a->rn);
5579 tmp = tcg_temp_new_i32();
5580 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
5581 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
5582
5583 store_reg(s, a->rt, tmp);
5584 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
5585 return true;
5586 }
5587
trans_LDA(DisasContext * s,arg_LDA * a)5588 static bool trans_LDA(DisasContext *s, arg_LDA *a)
5589 {
5590 return op_lda(s, a, MO_UL);
5591 }
5592
trans_LDAB(DisasContext * s,arg_LDA * a)5593 static bool trans_LDAB(DisasContext *s, arg_LDA *a)
5594 {
5595 return op_lda(s, a, MO_UB);
5596 }
5597
trans_LDAH(DisasContext * s,arg_LDA * a)5598 static bool trans_LDAH(DisasContext *s, arg_LDA *a)
5599 {
5600 return op_lda(s, a, MO_UW);
5601 }
5602
5603 /*
5604 * Media instructions
5605 */
5606
trans_USADA8(DisasContext * s,arg_USADA8 * a)5607 static bool trans_USADA8(DisasContext *s, arg_USADA8 *a)
5608 {
5609 TCGv_i32 t1, t2;
5610
5611 if (!ENABLE_ARCH_6) {
5612 return false;
5613 }
5614
5615 t1 = load_reg(s, a->rn);
5616 t2 = load_reg(s, a->rm);
5617 gen_helper_usad8(t1, t1, t2);
5618 if (a->ra != 15) {
5619 t2 = load_reg(s, a->ra);
5620 tcg_gen_add_i32(t1, t1, t2);
5621 }
5622 store_reg(s, a->rd, t1);
5623 return true;
5624 }
5625
op_bfx(DisasContext * s,arg_UBFX * a,bool u)5626 static bool op_bfx(DisasContext *s, arg_UBFX *a, bool u)
5627 {
5628 TCGv_i32 tmp;
5629 int width = a->widthm1 + 1;
5630 int shift = a->lsb;
5631
5632 if (!ENABLE_ARCH_6T2) {
5633 return false;
5634 }
5635 if (shift + width > 32) {
5636 /* UNPREDICTABLE; we choose to UNDEF */
5637 unallocated_encoding(s);
5638 return true;
5639 }
5640
5641 tmp = load_reg(s, a->rn);
5642 if (u) {
5643 tcg_gen_extract_i32(tmp, tmp, shift, width);
5644 } else {
5645 tcg_gen_sextract_i32(tmp, tmp, shift, width);
5646 }
5647 store_reg(s, a->rd, tmp);
5648 return true;
5649 }
5650
trans_SBFX(DisasContext * s,arg_SBFX * a)5651 static bool trans_SBFX(DisasContext *s, arg_SBFX *a)
5652 {
5653 return op_bfx(s, a, false);
5654 }
5655
trans_UBFX(DisasContext * s,arg_UBFX * a)5656 static bool trans_UBFX(DisasContext *s, arg_UBFX *a)
5657 {
5658 return op_bfx(s, a, true);
5659 }
5660
trans_BFCI(DisasContext * s,arg_BFCI * a)5661 static bool trans_BFCI(DisasContext *s, arg_BFCI *a)
5662 {
5663 int msb = a->msb, lsb = a->lsb;
5664 TCGv_i32 t_in, t_rd;
5665 int width;
5666
5667 if (!ENABLE_ARCH_6T2) {
5668 return false;
5669 }
5670 if (msb < lsb) {
5671 /* UNPREDICTABLE; we choose to UNDEF */
5672 unallocated_encoding(s);
5673 return true;
5674 }
5675
5676 width = msb + 1 - lsb;
5677 if (a->rn == 15) {
5678 /* BFC */
5679 t_in = tcg_constant_i32(0);
5680 } else {
5681 /* BFI */
5682 t_in = load_reg(s, a->rn);
5683 }
5684 t_rd = load_reg(s, a->rd);
5685 tcg_gen_deposit_i32(t_rd, t_rd, t_in, lsb, width);
5686 store_reg(s, a->rd, t_rd);
5687 return true;
5688 }
5689
trans_UDF(DisasContext * s,arg_UDF * a)5690 static bool trans_UDF(DisasContext *s, arg_UDF *a)
5691 {
5692 unallocated_encoding(s);
5693 return true;
5694 }
5695
5696 /*
5697 * Parallel addition and subtraction
5698 */
5699
op_par_addsub(DisasContext * s,arg_rrr * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32))5700 static bool op_par_addsub(DisasContext *s, arg_rrr *a,
5701 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
5702 {
5703 TCGv_i32 t0, t1;
5704
5705 if (s->thumb
5706 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5707 : !ENABLE_ARCH_6) {
5708 return false;
5709 }
5710
5711 t0 = load_reg(s, a->rn);
5712 t1 = load_reg(s, a->rm);
5713
5714 gen(t0, t0, t1);
5715
5716 store_reg(s, a->rd, t0);
5717 return true;
5718 }
5719
op_par_addsub_ge(DisasContext * s,arg_rrr * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32,TCGv_ptr))5720 static bool op_par_addsub_ge(DisasContext *s, arg_rrr *a,
5721 void (*gen)(TCGv_i32, TCGv_i32,
5722 TCGv_i32, TCGv_ptr))
5723 {
5724 TCGv_i32 t0, t1;
5725 TCGv_ptr ge;
5726
5727 if (s->thumb
5728 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5729 : !ENABLE_ARCH_6) {
5730 return false;
5731 }
5732
5733 t0 = load_reg(s, a->rn);
5734 t1 = load_reg(s, a->rm);
5735
5736 ge = tcg_temp_new_ptr();
5737 tcg_gen_addi_ptr(ge, tcg_env, offsetof(CPUARMState, GE));
5738 gen(t0, t0, t1, ge);
5739
5740 store_reg(s, a->rd, t0);
5741 return true;
5742 }
5743
5744 #define DO_PAR_ADDSUB(NAME, helper) \
5745 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
5746 { \
5747 return op_par_addsub(s, a, helper); \
5748 }
5749
5750 #define DO_PAR_ADDSUB_GE(NAME, helper) \
5751 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
5752 { \
5753 return op_par_addsub_ge(s, a, helper); \
5754 }
5755
DO_PAR_ADDSUB_GE(SADD16,gen_helper_sadd16)5756 DO_PAR_ADDSUB_GE(SADD16, gen_helper_sadd16)
5757 DO_PAR_ADDSUB_GE(SASX, gen_helper_saddsubx)
5758 DO_PAR_ADDSUB_GE(SSAX, gen_helper_ssubaddx)
5759 DO_PAR_ADDSUB_GE(SSUB16, gen_helper_ssub16)
5760 DO_PAR_ADDSUB_GE(SADD8, gen_helper_sadd8)
5761 DO_PAR_ADDSUB_GE(SSUB8, gen_helper_ssub8)
5762
5763 DO_PAR_ADDSUB_GE(UADD16, gen_helper_uadd16)
5764 DO_PAR_ADDSUB_GE(UASX, gen_helper_uaddsubx)
5765 DO_PAR_ADDSUB_GE(USAX, gen_helper_usubaddx)
5766 DO_PAR_ADDSUB_GE(USUB16, gen_helper_usub16)
5767 DO_PAR_ADDSUB_GE(UADD8, gen_helper_uadd8)
5768 DO_PAR_ADDSUB_GE(USUB8, gen_helper_usub8)
5769
5770 DO_PAR_ADDSUB(QADD16, gen_helper_qadd16)
5771 DO_PAR_ADDSUB(QASX, gen_helper_qaddsubx)
5772 DO_PAR_ADDSUB(QSAX, gen_helper_qsubaddx)
5773 DO_PAR_ADDSUB(QSUB16, gen_helper_qsub16)
5774 DO_PAR_ADDSUB(QADD8, gen_helper_qadd8)
5775 DO_PAR_ADDSUB(QSUB8, gen_helper_qsub8)
5776
5777 DO_PAR_ADDSUB(UQADD16, gen_helper_uqadd16)
5778 DO_PAR_ADDSUB(UQASX, gen_helper_uqaddsubx)
5779 DO_PAR_ADDSUB(UQSAX, gen_helper_uqsubaddx)
5780 DO_PAR_ADDSUB(UQSUB16, gen_helper_uqsub16)
5781 DO_PAR_ADDSUB(UQADD8, gen_helper_uqadd8)
5782 DO_PAR_ADDSUB(UQSUB8, gen_helper_uqsub8)
5783
5784 DO_PAR_ADDSUB(SHADD16, gen_helper_shadd16)
5785 DO_PAR_ADDSUB(SHASX, gen_helper_shaddsubx)
5786 DO_PAR_ADDSUB(SHSAX, gen_helper_shsubaddx)
5787 DO_PAR_ADDSUB(SHSUB16, gen_helper_shsub16)
5788 DO_PAR_ADDSUB(SHADD8, gen_helper_shadd8)
5789 DO_PAR_ADDSUB(SHSUB8, gen_helper_shsub8)
5790
5791 DO_PAR_ADDSUB(UHADD16, gen_helper_uhadd16)
5792 DO_PAR_ADDSUB(UHASX, gen_helper_uhaddsubx)
5793 DO_PAR_ADDSUB(UHSAX, gen_helper_uhsubaddx)
5794 DO_PAR_ADDSUB(UHSUB16, gen_helper_uhsub16)
5795 DO_PAR_ADDSUB(UHADD8, gen_helper_uhadd8)
5796 DO_PAR_ADDSUB(UHSUB8, gen_helper_uhsub8)
5797
5798 #undef DO_PAR_ADDSUB
5799 #undef DO_PAR_ADDSUB_GE
5800
5801 /*
5802 * Packing, unpacking, saturation, and reversal
5803 */
5804
5805 static bool trans_PKH(DisasContext *s, arg_PKH *a)
5806 {
5807 TCGv_i32 tn, tm;
5808 int shift = a->imm;
5809
5810 if (s->thumb
5811 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5812 : !ENABLE_ARCH_6) {
5813 return false;
5814 }
5815
5816 tn = load_reg(s, a->rn);
5817 tm = load_reg(s, a->rm);
5818 if (a->tb) {
5819 /* PKHTB */
5820 if (shift == 0) {
5821 shift = 31;
5822 }
5823 tcg_gen_sari_i32(tm, tm, shift);
5824 tcg_gen_deposit_i32(tn, tn, tm, 0, 16);
5825 } else {
5826 /* PKHBT */
5827 tcg_gen_shli_i32(tm, tm, shift);
5828 tcg_gen_deposit_i32(tn, tm, tn, 0, 16);
5829 }
5830 store_reg(s, a->rd, tn);
5831 return true;
5832 }
5833
op_sat(DisasContext * s,arg_sat * a,void (* gen)(TCGv_i32,TCGv_env,TCGv_i32,TCGv_i32))5834 static bool op_sat(DisasContext *s, arg_sat *a,
5835 void (*gen)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
5836 {
5837 TCGv_i32 tmp;
5838 int shift = a->imm;
5839
5840 if (!ENABLE_ARCH_6) {
5841 return false;
5842 }
5843
5844 tmp = load_reg(s, a->rn);
5845 if (a->sh) {
5846 tcg_gen_sari_i32(tmp, tmp, shift ? shift : 31);
5847 } else {
5848 tcg_gen_shli_i32(tmp, tmp, shift);
5849 }
5850
5851 gen(tmp, tcg_env, tmp, tcg_constant_i32(a->satimm));
5852
5853 store_reg(s, a->rd, tmp);
5854 return true;
5855 }
5856
trans_SSAT(DisasContext * s,arg_sat * a)5857 static bool trans_SSAT(DisasContext *s, arg_sat *a)
5858 {
5859 return op_sat(s, a, gen_helper_ssat);
5860 }
5861
trans_USAT(DisasContext * s,arg_sat * a)5862 static bool trans_USAT(DisasContext *s, arg_sat *a)
5863 {
5864 return op_sat(s, a, gen_helper_usat);
5865 }
5866
trans_SSAT16(DisasContext * s,arg_sat * a)5867 static bool trans_SSAT16(DisasContext *s, arg_sat *a)
5868 {
5869 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
5870 return false;
5871 }
5872 return op_sat(s, a, gen_helper_ssat16);
5873 }
5874
trans_USAT16(DisasContext * s,arg_sat * a)5875 static bool trans_USAT16(DisasContext *s, arg_sat *a)
5876 {
5877 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
5878 return false;
5879 }
5880 return op_sat(s, a, gen_helper_usat16);
5881 }
5882
op_xta(DisasContext * s,arg_rrr_rot * a,void (* gen_extract)(TCGv_i32,TCGv_i32),void (* gen_add)(TCGv_i32,TCGv_i32,TCGv_i32))5883 static bool op_xta(DisasContext *s, arg_rrr_rot *a,
5884 void (*gen_extract)(TCGv_i32, TCGv_i32),
5885 void (*gen_add)(TCGv_i32, TCGv_i32, TCGv_i32))
5886 {
5887 TCGv_i32 tmp;
5888
5889 if (!ENABLE_ARCH_6) {
5890 return false;
5891 }
5892
5893 tmp = load_reg(s, a->rm);
5894 /*
5895 * TODO: In many cases we could do a shift instead of a rotate.
5896 * Combined with a simple extend, that becomes an extract.
5897 */
5898 tcg_gen_rotri_i32(tmp, tmp, a->rot * 8);
5899 gen_extract(tmp, tmp);
5900
5901 if (a->rn != 15) {
5902 TCGv_i32 tmp2 = load_reg(s, a->rn);
5903 gen_add(tmp, tmp, tmp2);
5904 }
5905 store_reg(s, a->rd, tmp);
5906 return true;
5907 }
5908
trans_SXTAB(DisasContext * s,arg_rrr_rot * a)5909 static bool trans_SXTAB(DisasContext *s, arg_rrr_rot *a)
5910 {
5911 return op_xta(s, a, tcg_gen_ext8s_i32, tcg_gen_add_i32);
5912 }
5913
trans_SXTAH(DisasContext * s,arg_rrr_rot * a)5914 static bool trans_SXTAH(DisasContext *s, arg_rrr_rot *a)
5915 {
5916 return op_xta(s, a, tcg_gen_ext16s_i32, tcg_gen_add_i32);
5917 }
5918
trans_SXTAB16(DisasContext * s,arg_rrr_rot * a)5919 static bool trans_SXTAB16(DisasContext *s, arg_rrr_rot *a)
5920 {
5921 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
5922 return false;
5923 }
5924 return op_xta(s, a, gen_helper_sxtb16, gen_add16);
5925 }
5926
trans_UXTAB(DisasContext * s,arg_rrr_rot * a)5927 static bool trans_UXTAB(DisasContext *s, arg_rrr_rot *a)
5928 {
5929 return op_xta(s, a, tcg_gen_ext8u_i32, tcg_gen_add_i32);
5930 }
5931
trans_UXTAH(DisasContext * s,arg_rrr_rot * a)5932 static bool trans_UXTAH(DisasContext *s, arg_rrr_rot *a)
5933 {
5934 return op_xta(s, a, tcg_gen_ext16u_i32, tcg_gen_add_i32);
5935 }
5936
trans_UXTAB16(DisasContext * s,arg_rrr_rot * a)5937 static bool trans_UXTAB16(DisasContext *s, arg_rrr_rot *a)
5938 {
5939 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
5940 return false;
5941 }
5942 return op_xta(s, a, gen_helper_uxtb16, gen_add16);
5943 }
5944
trans_SEL(DisasContext * s,arg_rrr * a)5945 static bool trans_SEL(DisasContext *s, arg_rrr *a)
5946 {
5947 TCGv_i32 t1, t2, t3;
5948
5949 if (s->thumb
5950 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5951 : !ENABLE_ARCH_6) {
5952 return false;
5953 }
5954
5955 t1 = load_reg(s, a->rn);
5956 t2 = load_reg(s, a->rm);
5957 t3 = tcg_temp_new_i32();
5958 tcg_gen_ld_i32(t3, tcg_env, offsetof(CPUARMState, GE));
5959 gen_helper_sel_flags(t1, t3, t1, t2);
5960 store_reg(s, a->rd, t1);
5961 return true;
5962 }
5963
op_rr(DisasContext * s,arg_rr * a,void (* gen)(TCGv_i32,TCGv_i32))5964 static bool op_rr(DisasContext *s, arg_rr *a,
5965 void (*gen)(TCGv_i32, TCGv_i32))
5966 {
5967 TCGv_i32 tmp;
5968
5969 tmp = load_reg(s, a->rm);
5970 gen(tmp, tmp);
5971 store_reg(s, a->rd, tmp);
5972 return true;
5973 }
5974
trans_REV(DisasContext * s,arg_rr * a)5975 static bool trans_REV(DisasContext *s, arg_rr *a)
5976 {
5977 if (!ENABLE_ARCH_6) {
5978 return false;
5979 }
5980 return op_rr(s, a, tcg_gen_bswap32_i32);
5981 }
5982
trans_REV16(DisasContext * s,arg_rr * a)5983 static bool trans_REV16(DisasContext *s, arg_rr *a)
5984 {
5985 if (!ENABLE_ARCH_6) {
5986 return false;
5987 }
5988 return op_rr(s, a, gen_rev16);
5989 }
5990
trans_REVSH(DisasContext * s,arg_rr * a)5991 static bool trans_REVSH(DisasContext *s, arg_rr *a)
5992 {
5993 if (!ENABLE_ARCH_6) {
5994 return false;
5995 }
5996 return op_rr(s, a, gen_revsh);
5997 }
5998
trans_RBIT(DisasContext * s,arg_rr * a)5999 static bool trans_RBIT(DisasContext *s, arg_rr *a)
6000 {
6001 if (!ENABLE_ARCH_6T2) {
6002 return false;
6003 }
6004 return op_rr(s, a, gen_helper_rbit);
6005 }
6006
6007 /*
6008 * Signed multiply, signed and unsigned divide
6009 */
6010
op_smlad(DisasContext * s,arg_rrrr * a,bool m_swap,bool sub)6011 static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
6012 {
6013 TCGv_i32 t1, t2;
6014
6015 if (!ENABLE_ARCH_6) {
6016 return false;
6017 }
6018
6019 t1 = load_reg(s, a->rn);
6020 t2 = load_reg(s, a->rm);
6021 if (m_swap) {
6022 gen_swap_half(t2, t2);
6023 }
6024 gen_smul_dual(t1, t2);
6025
6026 if (sub) {
6027 /*
6028 * This subtraction cannot overflow, so we can do a simple
6029 * 32-bit subtraction and then a possible 32-bit saturating
6030 * addition of Ra.
6031 */
6032 tcg_gen_sub_i32(t1, t1, t2);
6033
6034 if (a->ra != 15) {
6035 t2 = load_reg(s, a->ra);
6036 gen_helper_add_setq(t1, tcg_env, t1, t2);
6037 }
6038 } else if (a->ra == 15) {
6039 /* Single saturation-checking addition */
6040 gen_helper_add_setq(t1, tcg_env, t1, t2);
6041 } else {
6042 /*
6043 * We need to add the products and Ra together and then
6044 * determine whether the final result overflowed. Doing
6045 * this as two separate add-and-check-overflow steps incorrectly
6046 * sets Q for cases like (-32768 * -32768) + (-32768 * -32768) + -1.
6047 * Do all the arithmetic at 64-bits and then check for overflow.
6048 */
6049 TCGv_i64 p64, q64;
6050 TCGv_i32 t3, qf, one;
6051
6052 p64 = tcg_temp_new_i64();
6053 q64 = tcg_temp_new_i64();
6054 tcg_gen_ext_i32_i64(p64, t1);
6055 tcg_gen_ext_i32_i64(q64, t2);
6056 tcg_gen_add_i64(p64, p64, q64);
6057 load_reg_var(s, t2, a->ra);
6058 tcg_gen_ext_i32_i64(q64, t2);
6059 tcg_gen_add_i64(p64, p64, q64);
6060
6061 tcg_gen_extr_i64_i32(t1, t2, p64);
6062 /*
6063 * t1 is the low half of the result which goes into Rd.
6064 * We have overflow and must set Q if the high half (t2)
6065 * is different from the sign-extension of t1.
6066 */
6067 t3 = tcg_temp_new_i32();
6068 tcg_gen_sari_i32(t3, t1, 31);
6069 qf = load_cpu_field(QF);
6070 one = tcg_constant_i32(1);
6071 tcg_gen_movcond_i32(TCG_COND_NE, qf, t2, t3, one, qf);
6072 store_cpu_field(qf, QF);
6073 }
6074 store_reg(s, a->rd, t1);
6075 return true;
6076 }
6077
trans_SMLAD(DisasContext * s,arg_rrrr * a)6078 static bool trans_SMLAD(DisasContext *s, arg_rrrr *a)
6079 {
6080 return op_smlad(s, a, false, false);
6081 }
6082
trans_SMLADX(DisasContext * s,arg_rrrr * a)6083 static bool trans_SMLADX(DisasContext *s, arg_rrrr *a)
6084 {
6085 return op_smlad(s, a, true, false);
6086 }
6087
trans_SMLSD(DisasContext * s,arg_rrrr * a)6088 static bool trans_SMLSD(DisasContext *s, arg_rrrr *a)
6089 {
6090 return op_smlad(s, a, false, true);
6091 }
6092
trans_SMLSDX(DisasContext * s,arg_rrrr * a)6093 static bool trans_SMLSDX(DisasContext *s, arg_rrrr *a)
6094 {
6095 return op_smlad(s, a, true, true);
6096 }
6097
op_smlald(DisasContext * s,arg_rrrr * a,bool m_swap,bool sub)6098 static bool op_smlald(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
6099 {
6100 TCGv_i32 t1, t2;
6101 TCGv_i64 l1, l2;
6102
6103 if (!ENABLE_ARCH_6) {
6104 return false;
6105 }
6106
6107 t1 = load_reg(s, a->rn);
6108 t2 = load_reg(s, a->rm);
6109 if (m_swap) {
6110 gen_swap_half(t2, t2);
6111 }
6112 gen_smul_dual(t1, t2);
6113
6114 l1 = tcg_temp_new_i64();
6115 l2 = tcg_temp_new_i64();
6116 tcg_gen_ext_i32_i64(l1, t1);
6117 tcg_gen_ext_i32_i64(l2, t2);
6118
6119 if (sub) {
6120 tcg_gen_sub_i64(l1, l1, l2);
6121 } else {
6122 tcg_gen_add_i64(l1, l1, l2);
6123 }
6124
6125 gen_addq(s, l1, a->ra, a->rd);
6126 gen_storeq_reg(s, a->ra, a->rd, l1);
6127 return true;
6128 }
6129
trans_SMLALD(DisasContext * s,arg_rrrr * a)6130 static bool trans_SMLALD(DisasContext *s, arg_rrrr *a)
6131 {
6132 return op_smlald(s, a, false, false);
6133 }
6134
trans_SMLALDX(DisasContext * s,arg_rrrr * a)6135 static bool trans_SMLALDX(DisasContext *s, arg_rrrr *a)
6136 {
6137 return op_smlald(s, a, true, false);
6138 }
6139
trans_SMLSLD(DisasContext * s,arg_rrrr * a)6140 static bool trans_SMLSLD(DisasContext *s, arg_rrrr *a)
6141 {
6142 return op_smlald(s, a, false, true);
6143 }
6144
trans_SMLSLDX(DisasContext * s,arg_rrrr * a)6145 static bool trans_SMLSLDX(DisasContext *s, arg_rrrr *a)
6146 {
6147 return op_smlald(s, a, true, true);
6148 }
6149
op_smmla(DisasContext * s,arg_rrrr * a,bool round,bool sub)6150 static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub)
6151 {
6152 TCGv_i32 t1, t2;
6153
6154 if (s->thumb
6155 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
6156 : !ENABLE_ARCH_6) {
6157 return false;
6158 }
6159
6160 t1 = load_reg(s, a->rn);
6161 t2 = load_reg(s, a->rm);
6162 tcg_gen_muls2_i32(t2, t1, t1, t2);
6163
6164 if (a->ra != 15) {
6165 TCGv_i32 t3 = load_reg(s, a->ra);
6166 if (sub) {
6167 /*
6168 * For SMMLS, we need a 64-bit subtract. Borrow caused by
6169 * a non-zero multiplicand lowpart, and the correct result
6170 * lowpart for rounding.
6171 */
6172 tcg_gen_sub2_i32(t2, t1, tcg_constant_i32(0), t3, t2, t1);
6173 } else {
6174 tcg_gen_add_i32(t1, t1, t3);
6175 }
6176 }
6177 if (round) {
6178 /*
6179 * Adding 0x80000000 to the 64-bit quantity means that we have
6180 * carry in to the high word when the low word has the msb set.
6181 */
6182 tcg_gen_shri_i32(t2, t2, 31);
6183 tcg_gen_add_i32(t1, t1, t2);
6184 }
6185 store_reg(s, a->rd, t1);
6186 return true;
6187 }
6188
trans_SMMLA(DisasContext * s,arg_rrrr * a)6189 static bool trans_SMMLA(DisasContext *s, arg_rrrr *a)
6190 {
6191 return op_smmla(s, a, false, false);
6192 }
6193
trans_SMMLAR(DisasContext * s,arg_rrrr * a)6194 static bool trans_SMMLAR(DisasContext *s, arg_rrrr *a)
6195 {
6196 return op_smmla(s, a, true, false);
6197 }
6198
trans_SMMLS(DisasContext * s,arg_rrrr * a)6199 static bool trans_SMMLS(DisasContext *s, arg_rrrr *a)
6200 {
6201 return op_smmla(s, a, false, true);
6202 }
6203
trans_SMMLSR(DisasContext * s,arg_rrrr * a)6204 static bool trans_SMMLSR(DisasContext *s, arg_rrrr *a)
6205 {
6206 return op_smmla(s, a, true, true);
6207 }
6208
op_div(DisasContext * s,arg_rrr * a,bool u)6209 static bool op_div(DisasContext *s, arg_rrr *a, bool u)
6210 {
6211 TCGv_i32 t1, t2;
6212
6213 if (s->thumb
6214 ? !dc_isar_feature(aa32_thumb_div, s)
6215 : !dc_isar_feature(aa32_arm_div, s)) {
6216 return false;
6217 }
6218
6219 t1 = load_reg(s, a->rn);
6220 t2 = load_reg(s, a->rm);
6221 if (u) {
6222 gen_helper_udiv(t1, tcg_env, t1, t2);
6223 } else {
6224 gen_helper_sdiv(t1, tcg_env, t1, t2);
6225 }
6226 store_reg(s, a->rd, t1);
6227 return true;
6228 }
6229
trans_SDIV(DisasContext * s,arg_rrr * a)6230 static bool trans_SDIV(DisasContext *s, arg_rrr *a)
6231 {
6232 return op_div(s, a, false);
6233 }
6234
trans_UDIV(DisasContext * s,arg_rrr * a)6235 static bool trans_UDIV(DisasContext *s, arg_rrr *a)
6236 {
6237 return op_div(s, a, true);
6238 }
6239
6240 /*
6241 * Block data transfer
6242 */
6243
op_addr_block_pre(DisasContext * s,arg_ldst_block * a,int n)6244 static TCGv_i32 op_addr_block_pre(DisasContext *s, arg_ldst_block *a, int n)
6245 {
6246 TCGv_i32 addr = load_reg(s, a->rn);
6247
6248 if (a->b) {
6249 if (a->i) {
6250 /* pre increment */
6251 tcg_gen_addi_i32(addr, addr, 4);
6252 } else {
6253 /* pre decrement */
6254 tcg_gen_addi_i32(addr, addr, -(n * 4));
6255 }
6256 } else if (!a->i && n != 1) {
6257 /* post decrement */
6258 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6259 }
6260
6261 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
6262 /*
6263 * If the writeback is incrementing SP rather than
6264 * decrementing it, and the initial SP is below the
6265 * stack limit but the final written-back SP would
6266 * be above, then we must not perform any memory
6267 * accesses, but it is IMPDEF whether we generate
6268 * an exception. We choose to do so in this case.
6269 * At this point 'addr' is the lowest address, so
6270 * either the original SP (if incrementing) or our
6271 * final SP (if decrementing), so that's what we check.
6272 */
6273 gen_helper_v8m_stackcheck(tcg_env, addr);
6274 }
6275
6276 return addr;
6277 }
6278
op_addr_block_post(DisasContext * s,arg_ldst_block * a,TCGv_i32 addr,int n)6279 static void op_addr_block_post(DisasContext *s, arg_ldst_block *a,
6280 TCGv_i32 addr, int n)
6281 {
6282 if (a->w) {
6283 /* write back */
6284 if (!a->b) {
6285 if (a->i) {
6286 /* post increment */
6287 tcg_gen_addi_i32(addr, addr, 4);
6288 } else {
6289 /* post decrement */
6290 tcg_gen_addi_i32(addr, addr, -(n * 4));
6291 }
6292 } else if (!a->i && n != 1) {
6293 /* pre decrement */
6294 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6295 }
6296 store_reg(s, a->rn, addr);
6297 }
6298 }
6299
op_stm(DisasContext * s,arg_ldst_block * a)6300 static bool op_stm(DisasContext *s, arg_ldst_block *a)
6301 {
6302 int i, j, n, list, mem_idx;
6303 bool user = a->u;
6304 TCGv_i32 addr, tmp;
6305
6306 if (user) {
6307 /* STM (user) */
6308 if (IS_USER(s)) {
6309 /* Only usable in supervisor mode. */
6310 unallocated_encoding(s);
6311 return true;
6312 }
6313 }
6314
6315 list = a->list;
6316 n = ctpop16(list);
6317 /*
6318 * This is UNPREDICTABLE for n < 1 in all encodings, and we choose
6319 * to UNDEF. In the T32 STM encoding n == 1 is also UNPREDICTABLE,
6320 * but hardware treats it like the A32 version and implements the
6321 * single-register-store, and some in-the-wild (buggy) software
6322 * assumes that, so we don't UNDEF on that case.
6323 */
6324 if (n < 1 || a->rn == 15) {
6325 unallocated_encoding(s);
6326 return true;
6327 }
6328
6329 s->eci_handled = true;
6330
6331 addr = op_addr_block_pre(s, a, n);
6332 mem_idx = get_mem_index(s);
6333
6334 for (i = j = 0; i < 16; i++) {
6335 if (!(list & (1 << i))) {
6336 continue;
6337 }
6338
6339 if (user && i != 15) {
6340 tmp = tcg_temp_new_i32();
6341 gen_helper_get_user_reg(tmp, tcg_env, tcg_constant_i32(i));
6342 } else {
6343 tmp = load_reg(s, i);
6344 }
6345 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
6346
6347 /* No need to add after the last transfer. */
6348 if (++j != n) {
6349 tcg_gen_addi_i32(addr, addr, 4);
6350 }
6351 }
6352
6353 op_addr_block_post(s, a, addr, n);
6354 clear_eci_state(s);
6355 return true;
6356 }
6357
trans_STM(DisasContext * s,arg_ldst_block * a)6358 static bool trans_STM(DisasContext *s, arg_ldst_block *a)
6359 {
6360 return op_stm(s, a);
6361 }
6362
trans_STM_t32(DisasContext * s,arg_ldst_block * a)6363 static bool trans_STM_t32(DisasContext *s, arg_ldst_block *a)
6364 {
6365 /* Writeback register in register list is UNPREDICTABLE for T32. */
6366 if (a->w && (a->list & (1 << a->rn))) {
6367 unallocated_encoding(s);
6368 return true;
6369 }
6370 return op_stm(s, a);
6371 }
6372
do_ldm(DisasContext * s,arg_ldst_block * a)6373 static bool do_ldm(DisasContext *s, arg_ldst_block *a)
6374 {
6375 int i, j, n, list, mem_idx;
6376 bool loaded_base;
6377 bool user = a->u;
6378 bool exc_return = false;
6379 TCGv_i32 addr, tmp, loaded_var;
6380
6381 if (user) {
6382 /* LDM (user), LDM (exception return) */
6383 if (IS_USER(s)) {
6384 /* Only usable in supervisor mode. */
6385 unallocated_encoding(s);
6386 return true;
6387 }
6388 if (extract32(a->list, 15, 1)) {
6389 exc_return = true;
6390 user = false;
6391 } else {
6392 /* LDM (user) does not allow writeback. */
6393 if (a->w) {
6394 unallocated_encoding(s);
6395 return true;
6396 }
6397 }
6398 }
6399
6400 list = a->list;
6401 n = ctpop16(list);
6402 /*
6403 * This is UNPREDICTABLE for n < 1 in all encodings, and we choose
6404 * to UNDEF. In the T32 LDM encoding n == 1 is also UNPREDICTABLE,
6405 * but hardware treats it like the A32 version and implements the
6406 * single-register-load, and some in-the-wild (buggy) software
6407 * assumes that, so we don't UNDEF on that case.
6408 */
6409 if (n < 1 || a->rn == 15) {
6410 unallocated_encoding(s);
6411 return true;
6412 }
6413
6414 s->eci_handled = true;
6415
6416 addr = op_addr_block_pre(s, a, n);
6417 mem_idx = get_mem_index(s);
6418 loaded_base = false;
6419 loaded_var = NULL;
6420
6421 for (i = j = 0; i < 16; i++) {
6422 if (!(list & (1 << i))) {
6423 continue;
6424 }
6425
6426 tmp = tcg_temp_new_i32();
6427 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
6428 if (user) {
6429 gen_helper_set_user_reg(tcg_env, tcg_constant_i32(i), tmp);
6430 } else if (i == a->rn) {
6431 loaded_var = tmp;
6432 loaded_base = true;
6433 } else if (i == 15 && exc_return) {
6434 store_pc_exc_ret(s, tmp);
6435 } else {
6436 store_reg_from_load(s, i, tmp);
6437 }
6438
6439 /* No need to add after the last transfer. */
6440 if (++j != n) {
6441 tcg_gen_addi_i32(addr, addr, 4);
6442 }
6443 }
6444
6445 op_addr_block_post(s, a, addr, n);
6446
6447 if (loaded_base) {
6448 /* Note that we reject base == pc above. */
6449 store_reg(s, a->rn, loaded_var);
6450 }
6451
6452 if (exc_return) {
6453 /* Restore CPSR from SPSR. */
6454 tmp = load_cpu_field(spsr);
6455 translator_io_start(&s->base);
6456 gen_helper_cpsr_write_eret(tcg_env, tmp);
6457 /* Must exit loop to check un-masked IRQs */
6458 s->base.is_jmp = DISAS_EXIT;
6459 }
6460 clear_eci_state(s);
6461 return true;
6462 }
6463
trans_LDM_a32(DisasContext * s,arg_ldst_block * a)6464 static bool trans_LDM_a32(DisasContext *s, arg_ldst_block *a)
6465 {
6466 /*
6467 * Writeback register in register list is UNPREDICTABLE
6468 * for ArchVersion() >= 7. Prior to v7, A32 would write
6469 * an UNKNOWN value to the base register.
6470 */
6471 if (ENABLE_ARCH_7 && a->w && (a->list & (1 << a->rn))) {
6472 unallocated_encoding(s);
6473 return true;
6474 }
6475 return do_ldm(s, a);
6476 }
6477
trans_LDM_t32(DisasContext * s,arg_ldst_block * a)6478 static bool trans_LDM_t32(DisasContext *s, arg_ldst_block *a)
6479 {
6480 /* Writeback register in register list is UNPREDICTABLE for T32. */
6481 if (a->w && (a->list & (1 << a->rn))) {
6482 unallocated_encoding(s);
6483 return true;
6484 }
6485 return do_ldm(s, a);
6486 }
6487
trans_LDM_t16(DisasContext * s,arg_ldst_block * a)6488 static bool trans_LDM_t16(DisasContext *s, arg_ldst_block *a)
6489 {
6490 /* Writeback is conditional on the base register not being loaded. */
6491 a->w = !(a->list & (1 << a->rn));
6492 return do_ldm(s, a);
6493 }
6494
trans_CLRM(DisasContext * s,arg_CLRM * a)6495 static bool trans_CLRM(DisasContext *s, arg_CLRM *a)
6496 {
6497 int i;
6498 TCGv_i32 zero;
6499
6500 if (!dc_isar_feature(aa32_m_sec_state, s)) {
6501 return false;
6502 }
6503
6504 if (extract32(a->list, 13, 1)) {
6505 return false;
6506 }
6507
6508 if (!a->list) {
6509 /* UNPREDICTABLE; we choose to UNDEF */
6510 return false;
6511 }
6512
6513 s->eci_handled = true;
6514
6515 zero = tcg_constant_i32(0);
6516 for (i = 0; i < 15; i++) {
6517 if (extract32(a->list, i, 1)) {
6518 /* Clear R[i] */
6519 tcg_gen_mov_i32(cpu_R[i], zero);
6520 }
6521 }
6522 if (extract32(a->list, 15, 1)) {
6523 /*
6524 * Clear APSR (by calling the MSR helper with the same argument
6525 * as for "MSR APSR_nzcvqg, Rn": mask = 0b1100, SYSM=0)
6526 */
6527 gen_helper_v7m_msr(tcg_env, tcg_constant_i32(0xc00), zero);
6528 }
6529 clear_eci_state(s);
6530 return true;
6531 }
6532
6533 /*
6534 * Branch, branch with link
6535 */
6536
trans_B(DisasContext * s,arg_i * a)6537 static bool trans_B(DisasContext *s, arg_i *a)
6538 {
6539 gen_jmp(s, jmp_diff(s, a->imm));
6540 return true;
6541 }
6542
trans_B_cond_thumb(DisasContext * s,arg_ci * a)6543 static bool trans_B_cond_thumb(DisasContext *s, arg_ci *a)
6544 {
6545 /* This has cond from encoding, required to be outside IT block. */
6546 if (a->cond >= 0xe) {
6547 return false;
6548 }
6549 if (s->condexec_mask) {
6550 unallocated_encoding(s);
6551 return true;
6552 }
6553 arm_skip_unless(s, a->cond);
6554 gen_jmp(s, jmp_diff(s, a->imm));
6555 return true;
6556 }
6557
trans_BL(DisasContext * s,arg_i * a)6558 static bool trans_BL(DisasContext *s, arg_i *a)
6559 {
6560 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
6561 gen_jmp(s, jmp_diff(s, a->imm));
6562 return true;
6563 }
6564
trans_BLX_i(DisasContext * s,arg_BLX_i * a)6565 static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
6566 {
6567 /*
6568 * BLX <imm> would be useless on M-profile; the encoding space
6569 * is used for other insns from v8.1M onward, and UNDEFs before that.
6570 */
6571 if (arm_dc_feature(s, ARM_FEATURE_M)) {
6572 return false;
6573 }
6574
6575 /* For A32, ARM_FEATURE_V5 is checked near the start of the uncond block. */
6576 if (s->thumb && (a->imm & 2)) {
6577 return false;
6578 }
6579 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
6580 store_cpu_field_constant(!s->thumb, thumb);
6581 /* This jump is computed from an aligned PC: subtract off the low bits. */
6582 gen_jmp(s, jmp_diff(s, a->imm - (s->pc_curr & 3)));
6583 return true;
6584 }
6585
trans_BL_BLX_prefix(DisasContext * s,arg_BL_BLX_prefix * a)6586 static bool trans_BL_BLX_prefix(DisasContext *s, arg_BL_BLX_prefix *a)
6587 {
6588 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
6589 gen_pc_plus_diff(s, cpu_R[14], jmp_diff(s, a->imm << 12));
6590 return true;
6591 }
6592
trans_BL_suffix(DisasContext * s,arg_BL_suffix * a)6593 static bool trans_BL_suffix(DisasContext *s, arg_BL_suffix *a)
6594 {
6595 TCGv_i32 tmp = tcg_temp_new_i32();
6596
6597 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
6598 tcg_gen_addi_i32(tmp, cpu_R[14], (a->imm << 1) | 1);
6599 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | 1);
6600 gen_bx(s, tmp);
6601 return true;
6602 }
6603
trans_BLX_suffix(DisasContext * s,arg_BLX_suffix * a)6604 static bool trans_BLX_suffix(DisasContext *s, arg_BLX_suffix *a)
6605 {
6606 TCGv_i32 tmp;
6607
6608 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
6609 if (!ENABLE_ARCH_5) {
6610 return false;
6611 }
6612 tmp = tcg_temp_new_i32();
6613 tcg_gen_addi_i32(tmp, cpu_R[14], a->imm << 1);
6614 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
6615 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | 1);
6616 gen_bx(s, tmp);
6617 return true;
6618 }
6619
trans_BF(DisasContext * s,arg_BF * a)6620 static bool trans_BF(DisasContext *s, arg_BF *a)
6621 {
6622 /*
6623 * M-profile branch future insns. The architecture permits an
6624 * implementation to implement these as NOPs (equivalent to
6625 * discarding the LO_BRANCH_INFO cache immediately), and we
6626 * take that IMPDEF option because for QEMU a "real" implementation
6627 * would be complicated and wouldn't execute any faster.
6628 */
6629 if (!dc_isar_feature(aa32_lob, s)) {
6630 return false;
6631 }
6632 if (a->boff == 0) {
6633 /* SEE "Related encodings" (loop insns) */
6634 return false;
6635 }
6636 /* Handle as NOP */
6637 return true;
6638 }
6639
trans_DLS(DisasContext * s,arg_DLS * a)6640 static bool trans_DLS(DisasContext *s, arg_DLS *a)
6641 {
6642 /* M-profile low-overhead loop start */
6643 TCGv_i32 tmp;
6644
6645 if (!dc_isar_feature(aa32_lob, s)) {
6646 return false;
6647 }
6648 if (a->rn == 13 || a->rn == 15) {
6649 /*
6650 * For DLSTP rn == 15 is a related encoding (LCTP); the
6651 * other cases caught by this condition are all
6652 * CONSTRAINED UNPREDICTABLE: we choose to UNDEF
6653 */
6654 return false;
6655 }
6656
6657 if (a->size != 4) {
6658 /* DLSTP */
6659 if (!dc_isar_feature(aa32_mve, s)) {
6660 return false;
6661 }
6662 if (!vfp_access_check(s)) {
6663 return true;
6664 }
6665 }
6666
6667 /* Not a while loop: set LR to the count, and set LTPSIZE for DLSTP */
6668 tmp = load_reg(s, a->rn);
6669 store_reg(s, 14, tmp);
6670 if (a->size != 4) {
6671 /* DLSTP: set FPSCR.LTPSIZE */
6672 store_cpu_field(tcg_constant_i32(a->size), v7m.ltpsize);
6673 s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
6674 }
6675 return true;
6676 }
6677
trans_WLS(DisasContext * s,arg_WLS * a)6678 static bool trans_WLS(DisasContext *s, arg_WLS *a)
6679 {
6680 /* M-profile low-overhead while-loop start */
6681 TCGv_i32 tmp;
6682 DisasLabel nextlabel;
6683
6684 if (!dc_isar_feature(aa32_lob, s)) {
6685 return false;
6686 }
6687 if (a->rn == 13 || a->rn == 15) {
6688 /*
6689 * For WLSTP rn == 15 is a related encoding (LE); the
6690 * other cases caught by this condition are all
6691 * CONSTRAINED UNPREDICTABLE: we choose to UNDEF
6692 */
6693 return false;
6694 }
6695 if (s->condexec_mask) {
6696 /*
6697 * WLS in an IT block is CONSTRAINED UNPREDICTABLE;
6698 * we choose to UNDEF, because otherwise our use of
6699 * gen_goto_tb(1) would clash with the use of TB exit 1
6700 * in the dc->condjmp condition-failed codepath in
6701 * arm_tr_tb_stop() and we'd get an assertion.
6702 */
6703 return false;
6704 }
6705 if (a->size != 4) {
6706 /* WLSTP */
6707 if (!dc_isar_feature(aa32_mve, s)) {
6708 return false;
6709 }
6710 /*
6711 * We need to check that the FPU is enabled here, but mustn't
6712 * call vfp_access_check() to do that because we don't want to
6713 * do the lazy state preservation in the "loop count is zero" case.
6714 * Do the check-and-raise-exception by hand.
6715 */
6716 if (s->fp_excp_el) {
6717 gen_exception_insn_el(s, 0, EXCP_NOCP,
6718 syn_uncategorized(), s->fp_excp_el);
6719 return true;
6720 }
6721 }
6722
6723 nextlabel = gen_disas_label(s);
6724 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_R[a->rn], 0, nextlabel.label);
6725 tmp = load_reg(s, a->rn);
6726 store_reg(s, 14, tmp);
6727 if (a->size != 4) {
6728 /*
6729 * WLSTP: set FPSCR.LTPSIZE. This requires that we do the
6730 * lazy state preservation, new FP context creation, etc,
6731 * that vfp_access_check() does. We know that the actual
6732 * access check will succeed (ie it won't generate code that
6733 * throws an exception) because we did that check by hand earlier.
6734 */
6735 bool ok = vfp_access_check(s);
6736 assert(ok);
6737 store_cpu_field(tcg_constant_i32(a->size), v7m.ltpsize);
6738 /*
6739 * LTPSIZE updated, but MVE_NO_PRED will always be the same thing (0)
6740 * when we take this upcoming exit from this TB, so gen_jmp_tb() is OK.
6741 */
6742 }
6743 gen_jmp_tb(s, curr_insn_len(s), 1);
6744
6745 set_disas_label(s, nextlabel);
6746 gen_jmp(s, jmp_diff(s, a->imm));
6747 return true;
6748 }
6749
trans_LE(DisasContext * s,arg_LE * a)6750 static bool trans_LE(DisasContext *s, arg_LE *a)
6751 {
6752 /*
6753 * M-profile low-overhead loop end. The architecture permits an
6754 * implementation to discard the LO_BRANCH_INFO cache at any time,
6755 * and we take the IMPDEF option to never set it in the first place
6756 * (equivalent to always discarding it immediately), because for QEMU
6757 * a "real" implementation would be complicated and wouldn't execute
6758 * any faster.
6759 */
6760 TCGv_i32 tmp;
6761 DisasLabel loopend;
6762 bool fpu_active;
6763
6764 if (!dc_isar_feature(aa32_lob, s)) {
6765 return false;
6766 }
6767 if (a->f && a->tp) {
6768 return false;
6769 }
6770 if (s->condexec_mask) {
6771 /*
6772 * LE in an IT block is CONSTRAINED UNPREDICTABLE;
6773 * we choose to UNDEF, because otherwise our use of
6774 * gen_goto_tb(1) would clash with the use of TB exit 1
6775 * in the dc->condjmp condition-failed codepath in
6776 * arm_tr_tb_stop() and we'd get an assertion.
6777 */
6778 return false;
6779 }
6780 if (a->tp) {
6781 /* LETP */
6782 if (!dc_isar_feature(aa32_mve, s)) {
6783 return false;
6784 }
6785 if (!vfp_access_check(s)) {
6786 s->eci_handled = true;
6787 return true;
6788 }
6789 }
6790
6791 /* LE/LETP is OK with ECI set and leaves it untouched */
6792 s->eci_handled = true;
6793
6794 /*
6795 * With MVE, LTPSIZE might not be 4, and we must emit an INVSTATE
6796 * UsageFault exception for the LE insn in that case. Note that we
6797 * are not directly checking FPSCR.LTPSIZE but instead check the
6798 * pseudocode LTPSIZE() function, which returns 4 if the FPU is
6799 * not currently active (ie ActiveFPState() returns false). We
6800 * can identify not-active purely from our TB state flags, as the
6801 * FPU is active only if:
6802 * the FPU is enabled
6803 * AND lazy state preservation is not active
6804 * AND we do not need a new fp context (this is the ASPEN/FPCA check)
6805 *
6806 * Usually we don't need to care about this distinction between
6807 * LTPSIZE and FPSCR.LTPSIZE, because the code in vfp_access_check()
6808 * will either take an exception or clear the conditions that make
6809 * the FPU not active. But LE is an unusual case of a non-FP insn
6810 * that looks at LTPSIZE.
6811 */
6812 fpu_active = !s->fp_excp_el && !s->v7m_lspact && !s->v7m_new_fp_ctxt_needed;
6813
6814 if (!a->tp && dc_isar_feature(aa32_mve, s) && fpu_active) {
6815 /* Need to do a runtime check for LTPSIZE != 4 */
6816 DisasLabel skipexc = gen_disas_label(s);
6817 tmp = load_cpu_field(v7m.ltpsize);
6818 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 4, skipexc.label);
6819 gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized());
6820 set_disas_label(s, skipexc);
6821 }
6822
6823 if (a->f) {
6824 /* Loop-forever: just jump back to the loop start */
6825 gen_jmp(s, jmp_diff(s, -a->imm));
6826 return true;
6827 }
6828
6829 /*
6830 * Not loop-forever. If LR <= loop-decrement-value this is the last loop.
6831 * For LE, we know at this point that LTPSIZE must be 4 and the
6832 * loop decrement value is 1. For LETP we need to calculate the decrement
6833 * value from LTPSIZE.
6834 */
6835 loopend = gen_disas_label(s);
6836 if (!a->tp) {
6837 tcg_gen_brcondi_i32(TCG_COND_LEU, cpu_R[14], 1, loopend.label);
6838 tcg_gen_addi_i32(cpu_R[14], cpu_R[14], -1);
6839 } else {
6840 /*
6841 * Decrement by 1 << (4 - LTPSIZE). We need to use a TCG local
6842 * so that decr stays live after the brcondi.
6843 */
6844 TCGv_i32 decr = tcg_temp_new_i32();
6845 TCGv_i32 ltpsize = load_cpu_field(v7m.ltpsize);
6846 tcg_gen_sub_i32(decr, tcg_constant_i32(4), ltpsize);
6847 tcg_gen_shl_i32(decr, tcg_constant_i32(1), decr);
6848
6849 tcg_gen_brcond_i32(TCG_COND_LEU, cpu_R[14], decr, loopend.label);
6850
6851 tcg_gen_sub_i32(cpu_R[14], cpu_R[14], decr);
6852 }
6853 /* Jump back to the loop start */
6854 gen_jmp(s, jmp_diff(s, -a->imm));
6855
6856 set_disas_label(s, loopend);
6857 if (a->tp) {
6858 /* Exits from tail-pred loops must reset LTPSIZE to 4 */
6859 store_cpu_field(tcg_constant_i32(4), v7m.ltpsize);
6860 }
6861 /* End TB, continuing to following insn */
6862 gen_jmp_tb(s, curr_insn_len(s), 1);
6863 return true;
6864 }
6865
trans_LCTP(DisasContext * s,arg_LCTP * a)6866 static bool trans_LCTP(DisasContext *s, arg_LCTP *a)
6867 {
6868 /*
6869 * M-profile Loop Clear with Tail Predication. Since our implementation
6870 * doesn't cache branch information, all we need to do is reset
6871 * FPSCR.LTPSIZE to 4.
6872 */
6873
6874 if (!dc_isar_feature(aa32_lob, s) ||
6875 !dc_isar_feature(aa32_mve, s)) {
6876 return false;
6877 }
6878
6879 if (!vfp_access_check(s)) {
6880 return true;
6881 }
6882
6883 store_cpu_field_constant(4, v7m.ltpsize);
6884 return true;
6885 }
6886
trans_VCTP(DisasContext * s,arg_VCTP * a)6887 static bool trans_VCTP(DisasContext *s, arg_VCTP *a)
6888 {
6889 /*
6890 * M-profile Create Vector Tail Predicate. This insn is itself
6891 * predicated and is subject to beatwise execution.
6892 */
6893 TCGv_i32 rn_shifted, masklen;
6894
6895 if (!dc_isar_feature(aa32_mve, s) || a->rn == 13 || a->rn == 15) {
6896 return false;
6897 }
6898
6899 if (!mve_eci_check(s) || !vfp_access_check(s)) {
6900 return true;
6901 }
6902
6903 /*
6904 * We pre-calculate the mask length here to avoid having
6905 * to have multiple helpers specialized for size.
6906 * We pass the helper "rn <= (1 << (4 - size)) ? (rn << size) : 16".
6907 */
6908 rn_shifted = tcg_temp_new_i32();
6909 masklen = load_reg(s, a->rn);
6910 tcg_gen_shli_i32(rn_shifted, masklen, a->size);
6911 tcg_gen_movcond_i32(TCG_COND_LEU, masklen,
6912 masklen, tcg_constant_i32(1 << (4 - a->size)),
6913 rn_shifted, tcg_constant_i32(16));
6914 gen_helper_mve_vctp(tcg_env, masklen);
6915 /* This insn updates predication bits */
6916 s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
6917 mve_update_eci(s);
6918 return true;
6919 }
6920
op_tbranch(DisasContext * s,arg_tbranch * a,bool half)6921 static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
6922 {
6923 TCGv_i32 addr, tmp;
6924
6925 tmp = load_reg(s, a->rm);
6926 if (half) {
6927 tcg_gen_add_i32(tmp, tmp, tmp);
6928 }
6929 addr = load_reg(s, a->rn);
6930 tcg_gen_add_i32(addr, addr, tmp);
6931
6932 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), half ? MO_UW : MO_UB);
6933
6934 tcg_gen_add_i32(tmp, tmp, tmp);
6935 gen_pc_plus_diff(s, addr, jmp_diff(s, 0));
6936 tcg_gen_add_i32(tmp, tmp, addr);
6937 store_reg(s, 15, tmp);
6938 return true;
6939 }
6940
trans_TBB(DisasContext * s,arg_tbranch * a)6941 static bool trans_TBB(DisasContext *s, arg_tbranch *a)
6942 {
6943 return op_tbranch(s, a, false);
6944 }
6945
trans_TBH(DisasContext * s,arg_tbranch * a)6946 static bool trans_TBH(DisasContext *s, arg_tbranch *a)
6947 {
6948 return op_tbranch(s, a, true);
6949 }
6950
trans_CBZ(DisasContext * s,arg_CBZ * a)6951 static bool trans_CBZ(DisasContext *s, arg_CBZ *a)
6952 {
6953 TCGv_i32 tmp = load_reg(s, a->rn);
6954
6955 arm_gen_condlabel(s);
6956 tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE,
6957 tmp, 0, s->condlabel.label);
6958 gen_jmp(s, jmp_diff(s, a->imm));
6959 return true;
6960 }
6961
6962 /*
6963 * Supervisor call - both T32 & A32 come here so we need to check
6964 * which mode we are in when checking for semihosting.
6965 */
6966
trans_SVC(DisasContext * s,arg_SVC * a)6967 static bool trans_SVC(DisasContext *s, arg_SVC *a)
6968 {
6969 const uint32_t semihost_imm = s->thumb ? 0xab : 0x123456;
6970
6971 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
6972 semihosting_enabled(s->current_el == 0) &&
6973 (a->imm == semihost_imm)) {
6974 gen_exception_internal_insn(s, EXCP_SEMIHOST);
6975 } else {
6976 if (s->fgt_svc) {
6977 uint32_t syndrome = syn_aa32_svc(a->imm, s->thumb);
6978 gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2);
6979 } else {
6980 gen_update_pc(s, curr_insn_len(s));
6981 s->svc_imm = a->imm;
6982 s->base.is_jmp = DISAS_SWI;
6983 }
6984 }
6985 return true;
6986 }
6987
6988 /*
6989 * Unconditional system instructions
6990 */
6991
trans_RFE(DisasContext * s,arg_RFE * a)6992 static bool trans_RFE(DisasContext *s, arg_RFE *a)
6993 {
6994 static const int8_t pre_offset[4] = {
6995 /* DA */ -4, /* IA */ 0, /* DB */ -8, /* IB */ 4
6996 };
6997 static const int8_t post_offset[4] = {
6998 /* DA */ -8, /* IA */ 4, /* DB */ -4, /* IB */ 0
6999 };
7000 TCGv_i32 addr, t1, t2;
7001
7002 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
7003 return false;
7004 }
7005 if (IS_USER(s)) {
7006 unallocated_encoding(s);
7007 return true;
7008 }
7009
7010 addr = load_reg(s, a->rn);
7011 tcg_gen_addi_i32(addr, addr, pre_offset[a->pu]);
7012
7013 /* Load PC into tmp and CPSR into tmp2. */
7014 t1 = tcg_temp_new_i32();
7015 gen_aa32_ld_i32(s, t1, addr, get_mem_index(s), MO_UL | MO_ALIGN);
7016 tcg_gen_addi_i32(addr, addr, 4);
7017 t2 = tcg_temp_new_i32();
7018 gen_aa32_ld_i32(s, t2, addr, get_mem_index(s), MO_UL | MO_ALIGN);
7019
7020 if (a->w) {
7021 /* Base writeback. */
7022 tcg_gen_addi_i32(addr, addr, post_offset[a->pu]);
7023 store_reg(s, a->rn, addr);
7024 }
7025 gen_rfe(s, t1, t2);
7026 return true;
7027 }
7028
trans_SRS(DisasContext * s,arg_SRS * a)7029 static bool trans_SRS(DisasContext *s, arg_SRS *a)
7030 {
7031 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
7032 return false;
7033 }
7034 gen_srs(s, a->mode, a->pu, a->w);
7035 return true;
7036 }
7037
trans_CPS(DisasContext * s,arg_CPS * a)7038 static bool trans_CPS(DisasContext *s, arg_CPS *a)
7039 {
7040 uint32_t mask, val;
7041
7042 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
7043 return false;
7044 }
7045 if (IS_USER(s)) {
7046 /* Implemented as NOP in user mode. */
7047 return true;
7048 }
7049 /* TODO: There are quite a lot of UNPREDICTABLE argument combinations. */
7050
7051 mask = val = 0;
7052 if (a->imod & 2) {
7053 if (a->A) {
7054 mask |= CPSR_A;
7055 }
7056 if (a->I) {
7057 mask |= CPSR_I;
7058 }
7059 if (a->F) {
7060 mask |= CPSR_F;
7061 }
7062 if (a->imod & 1) {
7063 val |= mask;
7064 }
7065 }
7066 if (a->M) {
7067 mask |= CPSR_M;
7068 val |= a->mode;
7069 }
7070 if (mask) {
7071 gen_set_psr_im(s, mask, 0, val);
7072 }
7073 return true;
7074 }
7075
trans_CPS_v7m(DisasContext * s,arg_CPS_v7m * a)7076 static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a)
7077 {
7078 TCGv_i32 tmp, addr;
7079
7080 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
7081 return false;
7082 }
7083 if (IS_USER(s)) {
7084 /* Implemented as NOP in user mode. */
7085 return true;
7086 }
7087
7088 tmp = tcg_constant_i32(a->im);
7089 /* FAULTMASK */
7090 if (a->F) {
7091 addr = tcg_constant_i32(19);
7092 gen_helper_v7m_msr(tcg_env, addr, tmp);
7093 }
7094 /* PRIMASK */
7095 if (a->I) {
7096 addr = tcg_constant_i32(16);
7097 gen_helper_v7m_msr(tcg_env, addr, tmp);
7098 }
7099 gen_rebuild_hflags(s, false);
7100 gen_lookup_tb(s);
7101 return true;
7102 }
7103
7104 /*
7105 * Clear-Exclusive, Barriers
7106 */
7107
trans_CLREX(DisasContext * s,arg_CLREX * a)7108 static bool trans_CLREX(DisasContext *s, arg_CLREX *a)
7109 {
7110 if (s->thumb
7111 ? !ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)
7112 : !ENABLE_ARCH_6K) {
7113 return false;
7114 }
7115 gen_clrex(s);
7116 return true;
7117 }
7118
trans_DSB(DisasContext * s,arg_DSB * a)7119 static bool trans_DSB(DisasContext *s, arg_DSB *a)
7120 {
7121 if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
7122 return false;
7123 }
7124 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
7125 return true;
7126 }
7127
trans_DMB(DisasContext * s,arg_DMB * a)7128 static bool trans_DMB(DisasContext *s, arg_DMB *a)
7129 {
7130 return trans_DSB(s, NULL);
7131 }
7132
trans_ISB(DisasContext * s,arg_ISB * a)7133 static bool trans_ISB(DisasContext *s, arg_ISB *a)
7134 {
7135 if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
7136 return false;
7137 }
7138 /*
7139 * We need to break the TB after this insn to execute
7140 * self-modifying code correctly and also to take
7141 * any pending interrupts immediately.
7142 */
7143 s->base.is_jmp = DISAS_TOO_MANY;
7144 return true;
7145 }
7146
trans_SB(DisasContext * s,arg_SB * a)7147 static bool trans_SB(DisasContext *s, arg_SB *a)
7148 {
7149 if (!dc_isar_feature(aa32_sb, s)) {
7150 return false;
7151 }
7152 /*
7153 * TODO: There is no speculation barrier opcode
7154 * for TCG; MB and end the TB instead.
7155 */
7156 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
7157 s->base.is_jmp = DISAS_TOO_MANY;
7158 return true;
7159 }
7160
trans_SETEND(DisasContext * s,arg_SETEND * a)7161 static bool trans_SETEND(DisasContext *s, arg_SETEND *a)
7162 {
7163 if (!ENABLE_ARCH_6) {
7164 return false;
7165 }
7166 if (a->E != (s->be_data == MO_BE)) {
7167 gen_helper_setend(tcg_env);
7168 s->base.is_jmp = DISAS_UPDATE_EXIT;
7169 }
7170 return true;
7171 }
7172
7173 /*
7174 * Preload instructions
7175 * All are nops, contingent on the appropriate arch level.
7176 */
7177
trans_PLD(DisasContext * s,arg_PLD * a)7178 static bool trans_PLD(DisasContext *s, arg_PLD *a)
7179 {
7180 return ENABLE_ARCH_5TE;
7181 }
7182
trans_PLDW(DisasContext * s,arg_PLDW * a)7183 static bool trans_PLDW(DisasContext *s, arg_PLDW *a)
7184 {
7185 return arm_dc_feature(s, ARM_FEATURE_V7MP);
7186 }
7187
trans_PLI(DisasContext * s,arg_PLI * a)7188 static bool trans_PLI(DisasContext *s, arg_PLI *a)
7189 {
7190 return ENABLE_ARCH_7;
7191 }
7192
7193 /*
7194 * If-then
7195 */
7196
trans_IT(DisasContext * s,arg_IT * a)7197 static bool trans_IT(DisasContext *s, arg_IT *a)
7198 {
7199 int cond_mask = a->cond_mask;
7200
7201 /*
7202 * No actual code generated for this insn, just setup state.
7203 *
7204 * Combinations of firstcond and mask which set up an 0b1111
7205 * condition are UNPREDICTABLE; we take the CONSTRAINED
7206 * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
7207 * i.e. both meaning "execute always".
7208 */
7209 s->condexec_cond = (cond_mask >> 4) & 0xe;
7210 s->condexec_mask = cond_mask & 0x1f;
7211 return true;
7212 }
7213
7214 /* v8.1M CSEL/CSINC/CSNEG/CSINV */
trans_CSEL(DisasContext * s,arg_CSEL * a)7215 static bool trans_CSEL(DisasContext *s, arg_CSEL *a)
7216 {
7217 TCGv_i32 rn, rm;
7218 DisasCompare c;
7219
7220 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
7221 return false;
7222 }
7223
7224 if (a->rm == 13) {
7225 /* SEE "Related encodings" (MVE shifts) */
7226 return false;
7227 }
7228
7229 if (a->rd == 13 || a->rd == 15 || a->rn == 13 || a->fcond >= 14) {
7230 /* CONSTRAINED UNPREDICTABLE: we choose to UNDEF */
7231 return false;
7232 }
7233
7234 /* In this insn input reg fields of 0b1111 mean "zero", not "PC" */
7235 rn = tcg_temp_new_i32();
7236 rm = tcg_temp_new_i32();
7237 if (a->rn == 15) {
7238 tcg_gen_movi_i32(rn, 0);
7239 } else {
7240 load_reg_var(s, rn, a->rn);
7241 }
7242 if (a->rm == 15) {
7243 tcg_gen_movi_i32(rm, 0);
7244 } else {
7245 load_reg_var(s, rm, a->rm);
7246 }
7247
7248 switch (a->op) {
7249 case 0: /* CSEL */
7250 break;
7251 case 1: /* CSINC */
7252 tcg_gen_addi_i32(rm, rm, 1);
7253 break;
7254 case 2: /* CSINV */
7255 tcg_gen_not_i32(rm, rm);
7256 break;
7257 case 3: /* CSNEG */
7258 tcg_gen_neg_i32(rm, rm);
7259 break;
7260 default:
7261 g_assert_not_reached();
7262 }
7263
7264 arm_test_cc(&c, a->fcond);
7265 tcg_gen_movcond_i32(c.cond, rn, c.value, tcg_constant_i32(0), rn, rm);
7266
7267 store_reg(s, a->rd, rn);
7268 return true;
7269 }
7270
7271 /*
7272 * Legacy decoder.
7273 */
7274
disas_arm_insn(DisasContext * s,unsigned int insn)7275 static void disas_arm_insn(DisasContext *s, unsigned int insn)
7276 {
7277 unsigned int cond = insn >> 28;
7278
7279 /* M variants do not implement ARM mode; this must raise the INVSTATE
7280 * UsageFault exception.
7281 */
7282 if (arm_dc_feature(s, ARM_FEATURE_M)) {
7283 gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized());
7284 return;
7285 }
7286
7287 if (s->pstate_il) {
7288 /*
7289 * Illegal execution state. This has priority over BTI
7290 * exceptions, but comes after instruction abort exceptions.
7291 */
7292 gen_exception_insn(s, 0, EXCP_UDEF, syn_illegalstate());
7293 return;
7294 }
7295
7296 if (cond == 0xf) {
7297 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7298 * choose to UNDEF. In ARMv5 and above the space is used
7299 * for miscellaneous unconditional instructions.
7300 */
7301 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
7302 unallocated_encoding(s);
7303 return;
7304 }
7305
7306 /* Unconditional instructions. */
7307 /* TODO: Perhaps merge these into one decodetree output file. */
7308 if (disas_a32_uncond(s, insn) ||
7309 disas_vfp_uncond(s, insn) ||
7310 disas_neon_dp(s, insn) ||
7311 disas_neon_ls(s, insn) ||
7312 disas_neon_shared(s, insn)) {
7313 return;
7314 }
7315 /* fall back to legacy decoder */
7316
7317 if ((insn & 0x0e000f00) == 0x0c000100) {
7318 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7319 /* iWMMXt register transfer. */
7320 if (extract32(s->c15_cpar, 1, 1)) {
7321 if (!disas_iwmmxt_insn(s, insn)) {
7322 return;
7323 }
7324 }
7325 }
7326 }
7327 goto illegal_op;
7328 }
7329 if (cond != 0xe) {
7330 /* if not always execute, we generate a conditional jump to
7331 next instruction */
7332 arm_skip_unless(s, cond);
7333 }
7334
7335 /* TODO: Perhaps merge these into one decodetree output file. */
7336 if (disas_a32(s, insn) ||
7337 disas_vfp(s, insn)) {
7338 return;
7339 }
7340 /* fall back to legacy decoder */
7341 /* TODO: convert xscale/iwmmxt decoder to decodetree ?? */
7342 if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7343 if (((insn & 0x0c000e00) == 0x0c000000)
7344 && ((insn & 0x03000000) != 0x03000000)) {
7345 /* Coprocessor insn, coprocessor 0 or 1 */
7346 disas_xscale_insn(s, insn);
7347 return;
7348 }
7349 }
7350
7351 illegal_op:
7352 unallocated_encoding(s);
7353 }
7354
thumb_insn_is_16bit(DisasContext * s,uint32_t pc,uint32_t insn)7355 static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
7356 {
7357 /*
7358 * Return true if this is a 16 bit instruction. We must be precise
7359 * about this (matching the decode).
7360 */
7361 if ((insn >> 11) < 0x1d) {
7362 /* Definitely a 16-bit instruction */
7363 return true;
7364 }
7365
7366 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
7367 * first half of a 32-bit Thumb insn. Thumb-1 cores might
7368 * end up actually treating this as two 16-bit insns, though,
7369 * if it's half of a bl/blx pair that might span a page boundary.
7370 */
7371 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
7372 arm_dc_feature(s, ARM_FEATURE_M)) {
7373 /* Thumb2 cores (including all M profile ones) always treat
7374 * 32-bit insns as 32-bit.
7375 */
7376 return false;
7377 }
7378
7379 if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
7380 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
7381 * is not on the next page; we merge this into a 32-bit
7382 * insn.
7383 */
7384 return false;
7385 }
7386 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
7387 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
7388 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
7389 * -- handle as single 16 bit insn
7390 */
7391 return true;
7392 }
7393
7394 /* Translate a 32-bit thumb instruction. */
disas_thumb2_insn(DisasContext * s,uint32_t insn)7395 static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
7396 {
7397 /*
7398 * ARMv6-M supports a limited subset of Thumb2 instructions.
7399 * Other Thumb1 architectures allow only 32-bit
7400 * combined BL/BLX prefix and suffix.
7401 */
7402 if (arm_dc_feature(s, ARM_FEATURE_M) &&
7403 !arm_dc_feature(s, ARM_FEATURE_V7)) {
7404 int i;
7405 bool found = false;
7406 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
7407 0xf3b08040 /* dsb */,
7408 0xf3b08050 /* dmb */,
7409 0xf3b08060 /* isb */,
7410 0xf3e08000 /* mrs */,
7411 0xf000d000 /* bl */};
7412 static const uint32_t armv6m_mask[] = {0xffe0d000,
7413 0xfff0d0f0,
7414 0xfff0d0f0,
7415 0xfff0d0f0,
7416 0xffe0d000,
7417 0xf800d000};
7418
7419 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
7420 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
7421 found = true;
7422 break;
7423 }
7424 }
7425 if (!found) {
7426 goto illegal_op;
7427 }
7428 } else if ((insn & 0xf800e800) != 0xf000e800) {
7429 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
7430 unallocated_encoding(s);
7431 return;
7432 }
7433 }
7434
7435 if (arm_dc_feature(s, ARM_FEATURE_M)) {
7436 /*
7437 * NOCP takes precedence over any UNDEF for (almost) the
7438 * entire wide range of coprocessor-space encodings, so check
7439 * for it first before proceeding to actually decode eg VFP
7440 * insns. This decode also handles the few insns which are
7441 * in copro space but do not have NOCP checks (eg VLLDM, VLSTM).
7442 */
7443 if (disas_m_nocp(s, insn)) {
7444 return;
7445 }
7446 }
7447
7448 if ((insn & 0xef000000) == 0xef000000) {
7449 /*
7450 * T32 encodings 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
7451 * transform into
7452 * A32 encodings 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
7453 */
7454 uint32_t a32_insn = (insn & 0xe2ffffff) |
7455 ((insn & (1 << 28)) >> 4) | (1 << 28);
7456
7457 if (disas_neon_dp(s, a32_insn)) {
7458 return;
7459 }
7460 }
7461
7462 if ((insn & 0xff100000) == 0xf9000000) {
7463 /*
7464 * T32 encodings 0b1111_1001_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
7465 * transform into
7466 * A32 encodings 0b1111_0100_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
7467 */
7468 uint32_t a32_insn = (insn & 0x00ffffff) | 0xf4000000;
7469
7470 if (disas_neon_ls(s, a32_insn)) {
7471 return;
7472 }
7473 }
7474
7475 /*
7476 * TODO: Perhaps merge these into one decodetree output file.
7477 * Note disas_vfp is written for a32 with cond field in the
7478 * top nibble. The t32 encoding requires 0xe in the top nibble.
7479 */
7480 if (disas_t32(s, insn) ||
7481 disas_vfp_uncond(s, insn) ||
7482 disas_neon_shared(s, insn) ||
7483 disas_mve(s, insn) ||
7484 ((insn >> 28) == 0xe && disas_vfp(s, insn))) {
7485 return;
7486 }
7487
7488 illegal_op:
7489 unallocated_encoding(s);
7490 }
7491
disas_thumb_insn(DisasContext * s,uint32_t insn)7492 static void disas_thumb_insn(DisasContext *s, uint32_t insn)
7493 {
7494 if (!disas_t16(s, insn)) {
7495 unallocated_encoding(s);
7496 }
7497 }
7498
insn_crosses_page(CPUARMState * env,DisasContext * s)7499 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
7500 {
7501 /* Return true if the insn at dc->base.pc_next might cross a page boundary.
7502 * (False positives are OK, false negatives are not.)
7503 * We know this is a Thumb insn, and our caller ensures we are
7504 * only called if dc->base.pc_next is less than 4 bytes from the page
7505 * boundary, so we cross the page if the first 16 bits indicate
7506 * that this is a 32 bit insn.
7507 */
7508 uint16_t insn = arm_lduw_code(env, &s->base, s->base.pc_next, s->sctlr_b);
7509
7510 return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
7511 }
7512
arm_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cs)7513 static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
7514 {
7515 DisasContext *dc = container_of(dcbase, DisasContext, base);
7516 CPUARMState *env = cpu_env(cs);
7517 ARMCPU *cpu = env_archcpu(env);
7518 CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
7519 uint32_t condexec, core_mmu_idx;
7520
7521 dc->isar = &cpu->isar;
7522 dc->condjmp = 0;
7523 dc->pc_save = dc->base.pc_first;
7524 dc->aarch64 = false;
7525 dc->thumb = EX_TBFLAG_AM32(tb_flags, THUMB);
7526 dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
7527 condexec = EX_TBFLAG_AM32(tb_flags, CONDEXEC);
7528 /*
7529 * the CONDEXEC TB flags are CPSR bits [15:10][26:25]. On A-profile this
7530 * is always the IT bits. On M-profile, some of the reserved encodings
7531 * of IT are used instead to indicate either ICI or ECI, which
7532 * indicate partial progress of a restartable insn that was interrupted
7533 * partway through by an exception:
7534 * * if CONDEXEC[3:0] != 0b0000 : CONDEXEC is IT bits
7535 * * if CONDEXEC[3:0] == 0b0000 : CONDEXEC is ICI or ECI bits
7536 * In all cases CONDEXEC == 0 means "not in IT block or restartable
7537 * insn, behave normally".
7538 */
7539 dc->eci = dc->condexec_mask = dc->condexec_cond = 0;
7540 dc->eci_handled = false;
7541 if (condexec & 0xf) {
7542 dc->condexec_mask = (condexec & 0xf) << 1;
7543 dc->condexec_cond = condexec >> 4;
7544 } else {
7545 if (arm_feature(env, ARM_FEATURE_M)) {
7546 dc->eci = condexec >> 4;
7547 }
7548 }
7549
7550 core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
7551 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
7552 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
7553 #if !defined(CONFIG_USER_ONLY)
7554 dc->user = (dc->current_el == 0);
7555 #endif
7556 dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
7557 dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
7558 dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
7559 dc->fgt_active = EX_TBFLAG_ANY(tb_flags, FGT_ACTIVE);
7560 dc->fgt_svc = EX_TBFLAG_ANY(tb_flags, FGT_SVC);
7561
7562 if (arm_feature(env, ARM_FEATURE_M)) {
7563 dc->vfp_enabled = 1;
7564 dc->be_data = MO_TE;
7565 dc->v7m_handler_mode = EX_TBFLAG_M32(tb_flags, HANDLER);
7566 dc->v8m_secure = EX_TBFLAG_M32(tb_flags, SECURE);
7567 dc->v8m_stackcheck = EX_TBFLAG_M32(tb_flags, STACKCHECK);
7568 dc->v8m_fpccr_s_wrong = EX_TBFLAG_M32(tb_flags, FPCCR_S_WRONG);
7569 dc->v7m_new_fp_ctxt_needed =
7570 EX_TBFLAG_M32(tb_flags, NEW_FP_CTXT_NEEDED);
7571 dc->v7m_lspact = EX_TBFLAG_M32(tb_flags, LSPACT);
7572 dc->mve_no_pred = EX_TBFLAG_M32(tb_flags, MVE_NO_PRED);
7573 } else {
7574 dc->sctlr_b = EX_TBFLAG_A32(tb_flags, SCTLR__B);
7575 dc->hstr_active = EX_TBFLAG_A32(tb_flags, HSTR_ACTIVE);
7576 dc->ns = EX_TBFLAG_A32(tb_flags, NS);
7577 dc->vfp_enabled = EX_TBFLAG_A32(tb_flags, VFPEN);
7578 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
7579 dc->c15_cpar = EX_TBFLAG_A32(tb_flags, XSCALE_CPAR);
7580 } else {
7581 dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN);
7582 dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE);
7583 }
7584 dc->sme_trap_nonstreaming =
7585 EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING);
7586 }
7587 dc->lse2 = false; /* applies only to aarch64 */
7588 dc->cp_regs = cpu->cp_regs;
7589 dc->features = env->features;
7590
7591 /* Single step state. The code-generation logic here is:
7592 * SS_ACTIVE == 0:
7593 * generate code with no special handling for single-stepping (except
7594 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
7595 * this happens anyway because those changes are all system register or
7596 * PSTATE writes).
7597 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
7598 * emit code for one insn
7599 * emit code to clear PSTATE.SS
7600 * emit code to generate software step exception for completed step
7601 * end TB (as usual for having generated an exception)
7602 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
7603 * emit code to generate a software step exception
7604 * end the TB
7605 */
7606 dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
7607 dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
7608 dc->is_ldex = false;
7609
7610 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
7611
7612 /* If architectural single step active, limit to 1. */
7613 if (dc->ss_active) {
7614 dc->base.max_insns = 1;
7615 }
7616
7617 /* ARM is a fixed-length ISA. Bound the number of insns to execute
7618 to those left on the page. */
7619 if (!dc->thumb) {
7620 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
7621 dc->base.max_insns = MIN(dc->base.max_insns, bound);
7622 }
7623
7624 cpu_V0 = tcg_temp_new_i64();
7625 cpu_V1 = tcg_temp_new_i64();
7626 cpu_M0 = tcg_temp_new_i64();
7627 }
7628
arm_tr_tb_start(DisasContextBase * dcbase,CPUState * cpu)7629 static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
7630 {
7631 DisasContext *dc = container_of(dcbase, DisasContext, base);
7632
7633 /* A note on handling of the condexec (IT) bits:
7634 *
7635 * We want to avoid the overhead of having to write the updated condexec
7636 * bits back to the CPUARMState for every instruction in an IT block. So:
7637 * (1) if the condexec bits are not already zero then we write
7638 * zero back into the CPUARMState now. This avoids complications trying
7639 * to do it at the end of the block. (For example if we don't do this
7640 * it's hard to identify whether we can safely skip writing condexec
7641 * at the end of the TB, which we definitely want to do for the case
7642 * where a TB doesn't do anything with the IT state at all.)
7643 * (2) if we are going to leave the TB then we call gen_set_condexec()
7644 * which will write the correct value into CPUARMState if zero is wrong.
7645 * This is done both for leaving the TB at the end, and for leaving
7646 * it because of an exception we know will happen, which is done in
7647 * gen_exception_insn(). The latter is necessary because we need to
7648 * leave the TB with the PC/IT state just prior to execution of the
7649 * instruction which caused the exception.
7650 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
7651 * then the CPUARMState will be wrong and we need to reset it.
7652 * This is handled in the same way as restoration of the
7653 * PC in these situations; we save the value of the condexec bits
7654 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
7655 * then uses this to restore them after an exception.
7656 *
7657 * Note that there are no instructions which can read the condexec
7658 * bits, and none which can write non-static values to them, so
7659 * we don't need to care about whether CPUARMState is correct in the
7660 * middle of a TB.
7661 */
7662
7663 /* Reset the conditional execution bits immediately. This avoids
7664 complications trying to do it at the end of the block. */
7665 if (dc->condexec_mask || dc->condexec_cond) {
7666 store_cpu_field_constant(0, condexec_bits);
7667 }
7668 }
7669
arm_tr_insn_start(DisasContextBase * dcbase,CPUState * cpu)7670 static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
7671 {
7672 DisasContext *dc = container_of(dcbase, DisasContext, base);
7673 /*
7674 * The ECI/ICI bits share PSR bits with the IT bits, so we
7675 * need to reconstitute the bits from the split-out DisasContext
7676 * fields here.
7677 */
7678 uint32_t condexec_bits;
7679 target_ulong pc_arg = dc->base.pc_next;
7680
7681 if (tb_cflags(dcbase->tb) & CF_PCREL) {
7682 pc_arg &= ~TARGET_PAGE_MASK;
7683 }
7684 if (dc->eci) {
7685 condexec_bits = dc->eci << 4;
7686 } else {
7687 condexec_bits = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
7688 }
7689 tcg_gen_insn_start(pc_arg, condexec_bits, 0);
7690 dc->insn_start_updated = false;
7691 }
7692
arm_check_kernelpage(DisasContext * dc)7693 static bool arm_check_kernelpage(DisasContext *dc)
7694 {
7695 #ifdef CONFIG_USER_ONLY
7696 /* Intercept jump to the magic kernel page. */
7697 if (dc->base.pc_next >= 0xffff0000) {
7698 /* We always get here via a jump, so know we are not in a
7699 conditional execution block. */
7700 gen_exception_internal(EXCP_KERNEL_TRAP);
7701 dc->base.is_jmp = DISAS_NORETURN;
7702 return true;
7703 }
7704 #endif
7705 return false;
7706 }
7707
arm_check_ss_active(DisasContext * dc)7708 static bool arm_check_ss_active(DisasContext *dc)
7709 {
7710 if (dc->ss_active && !dc->pstate_ss) {
7711 /* Singlestep state is Active-pending.
7712 * If we're in this state at the start of a TB then either
7713 * a) we just took an exception to an EL which is being debugged
7714 * and this is the first insn in the exception handler
7715 * b) debug exceptions were masked and we just unmasked them
7716 * without changing EL (eg by clearing PSTATE.D)
7717 * In either case we're going to take a swstep exception in the
7718 * "did not step an insn" case, and so the syndrome ISV and EX
7719 * bits should be zero.
7720 */
7721 assert(dc->base.num_insns == 1);
7722 gen_swstep_exception(dc, 0, 0);
7723 dc->base.is_jmp = DISAS_NORETURN;
7724 return true;
7725 }
7726
7727 return false;
7728 }
7729
arm_post_translate_insn(DisasContext * dc)7730 static void arm_post_translate_insn(DisasContext *dc)
7731 {
7732 if (dc->condjmp && dc->base.is_jmp == DISAS_NEXT) {
7733 if (dc->pc_save != dc->condlabel.pc_save) {
7734 gen_update_pc(dc, dc->condlabel.pc_save - dc->pc_save);
7735 }
7736 gen_set_label(dc->condlabel.label);
7737 dc->condjmp = 0;
7738 }
7739 }
7740
arm_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)7741 static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
7742 {
7743 DisasContext *dc = container_of(dcbase, DisasContext, base);
7744 CPUARMState *env = cpu_env(cpu);
7745 uint32_t pc = dc->base.pc_next;
7746 unsigned int insn;
7747
7748 /* Singlestep exceptions have the highest priority. */
7749 if (arm_check_ss_active(dc)) {
7750 dc->base.pc_next = pc + 4;
7751 return;
7752 }
7753
7754 if (pc & 3) {
7755 /*
7756 * PC alignment fault. This has priority over the instruction abort
7757 * that we would receive from a translation fault via arm_ldl_code
7758 * (or the execution of the kernelpage entrypoint). This should only
7759 * be possible after an indirect branch, at the start of the TB.
7760 */
7761 assert(dc->base.num_insns == 1);
7762 gen_helper_exception_pc_alignment(tcg_env, tcg_constant_tl(pc));
7763 dc->base.is_jmp = DISAS_NORETURN;
7764 dc->base.pc_next = QEMU_ALIGN_UP(pc, 4);
7765 return;
7766 }
7767
7768 if (arm_check_kernelpage(dc)) {
7769 dc->base.pc_next = pc + 4;
7770 return;
7771 }
7772
7773 dc->pc_curr = pc;
7774 insn = arm_ldl_code(env, &dc->base, pc, dc->sctlr_b);
7775 dc->insn = insn;
7776 dc->base.pc_next = pc + 4;
7777 disas_arm_insn(dc, insn);
7778
7779 arm_post_translate_insn(dc);
7780
7781 /* ARM is a fixed-length ISA. We performed the cross-page check
7782 in init_disas_context by adjusting max_insns. */
7783 }
7784
thumb_insn_is_unconditional(DisasContext * s,uint32_t insn)7785 static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
7786 {
7787 /* Return true if this Thumb insn is always unconditional,
7788 * even inside an IT block. This is true of only a very few
7789 * instructions: BKPT, HLT, and SG.
7790 *
7791 * A larger class of instructions are UNPREDICTABLE if used
7792 * inside an IT block; we do not need to detect those here, because
7793 * what we do by default (perform the cc check and update the IT
7794 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
7795 * choice for those situations.
7796 *
7797 * insn is either a 16-bit or a 32-bit instruction; the two are
7798 * distinguishable because for the 16-bit case the top 16 bits
7799 * are zeroes, and that isn't a valid 32-bit encoding.
7800 */
7801 if ((insn & 0xffffff00) == 0xbe00) {
7802 /* BKPT */
7803 return true;
7804 }
7805
7806 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
7807 !arm_dc_feature(s, ARM_FEATURE_M)) {
7808 /* HLT: v8A only. This is unconditional even when it is going to
7809 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
7810 * For v7 cores this was a plain old undefined encoding and so
7811 * honours its cc check. (We might be using the encoding as
7812 * a semihosting trap, but we don't change the cc check behaviour
7813 * on that account, because a debugger connected to a real v7A
7814 * core and emulating semihosting traps by catching the UNDEF
7815 * exception would also only see cases where the cc check passed.
7816 * No guest code should be trying to do a HLT semihosting trap
7817 * in an IT block anyway.
7818 */
7819 return true;
7820 }
7821
7822 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
7823 arm_dc_feature(s, ARM_FEATURE_M)) {
7824 /* SG: v8M only */
7825 return true;
7826 }
7827
7828 return false;
7829 }
7830
thumb_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)7831 static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
7832 {
7833 DisasContext *dc = container_of(dcbase, DisasContext, base);
7834 CPUARMState *env = cpu_env(cpu);
7835 uint32_t pc = dc->base.pc_next;
7836 uint32_t insn;
7837 bool is_16bit;
7838 /* TCG op to rewind to if this turns out to be an invalid ECI state */
7839 TCGOp *insn_eci_rewind = NULL;
7840 target_ulong insn_eci_pc_save = -1;
7841
7842 /* Misaligned thumb PC is architecturally impossible. */
7843 assert((dc->base.pc_next & 1) == 0);
7844
7845 if (arm_check_ss_active(dc) || arm_check_kernelpage(dc)) {
7846 dc->base.pc_next = pc + 2;
7847 return;
7848 }
7849
7850 dc->pc_curr = pc;
7851 insn = arm_lduw_code(env, &dc->base, pc, dc->sctlr_b);
7852 is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
7853 pc += 2;
7854 if (!is_16bit) {
7855 uint32_t insn2 = arm_lduw_code(env, &dc->base, pc, dc->sctlr_b);
7856 insn = insn << 16 | insn2;
7857 pc += 2;
7858 }
7859 dc->base.pc_next = pc;
7860 dc->insn = insn;
7861
7862 if (dc->pstate_il) {
7863 /*
7864 * Illegal execution state. This has priority over BTI
7865 * exceptions, but comes after instruction abort exceptions.
7866 */
7867 gen_exception_insn(dc, 0, EXCP_UDEF, syn_illegalstate());
7868 return;
7869 }
7870
7871 if (dc->eci) {
7872 /*
7873 * For M-profile continuable instructions, ECI/ICI handling
7874 * falls into these cases:
7875 * - interrupt-continuable instructions
7876 * These are the various load/store multiple insns (both
7877 * integer and fp). The ICI bits indicate the register
7878 * where the load/store can resume. We make the IMPDEF
7879 * choice to always do "instruction restart", ie ignore
7880 * the ICI value and always execute the ldm/stm from the
7881 * start. So all we need to do is zero PSR.ICI if the
7882 * insn executes.
7883 * - MVE instructions subject to beat-wise execution
7884 * Here the ECI bits indicate which beats have already been
7885 * executed, and we must honour this. Each insn of this
7886 * type will handle it correctly. We will update PSR.ECI
7887 * in the helper function for the insn (some ECI values
7888 * mean that the following insn also has been partially
7889 * executed).
7890 * - Special cases which don't advance ECI
7891 * The insns LE, LETP and BKPT leave the ECI/ICI state
7892 * bits untouched.
7893 * - all other insns (the common case)
7894 * Non-zero ECI/ICI means an INVSTATE UsageFault.
7895 * We place a rewind-marker here. Insns in the previous
7896 * three categories will set a flag in the DisasContext.
7897 * If the flag isn't set after we call disas_thumb_insn()
7898 * or disas_thumb2_insn() then we know we have a "some other
7899 * insn" case. We will rewind to the marker (ie throwing away
7900 * all the generated code) and instead emit "take exception".
7901 */
7902 insn_eci_rewind = tcg_last_op();
7903 insn_eci_pc_save = dc->pc_save;
7904 }
7905
7906 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
7907 uint32_t cond = dc->condexec_cond;
7908
7909 /*
7910 * Conditionally skip the insn. Note that both 0xe and 0xf mean
7911 * "always"; 0xf is not "never".
7912 */
7913 if (cond < 0x0e) {
7914 arm_skip_unless(dc, cond);
7915 }
7916 }
7917
7918 if (is_16bit) {
7919 disas_thumb_insn(dc, insn);
7920 } else {
7921 disas_thumb2_insn(dc, insn);
7922 }
7923
7924 /* Advance the Thumb condexec condition. */
7925 if (dc->condexec_mask) {
7926 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
7927 ((dc->condexec_mask >> 4) & 1));
7928 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
7929 if (dc->condexec_mask == 0) {
7930 dc->condexec_cond = 0;
7931 }
7932 }
7933
7934 if (dc->eci && !dc->eci_handled) {
7935 /*
7936 * Insn wasn't valid for ECI/ICI at all: undo what we
7937 * just generated and instead emit an exception
7938 */
7939 tcg_remove_ops_after(insn_eci_rewind);
7940 dc->pc_save = insn_eci_pc_save;
7941 dc->condjmp = 0;
7942 gen_exception_insn(dc, 0, EXCP_INVSTATE, syn_uncategorized());
7943 }
7944
7945 arm_post_translate_insn(dc);
7946
7947 /* Thumb is a variable-length ISA. Stop translation when the next insn
7948 * will touch a new page. This ensures that prefetch aborts occur at
7949 * the right place.
7950 *
7951 * We want to stop the TB if the next insn starts in a new page,
7952 * or if it spans between this page and the next. This means that
7953 * if we're looking at the last halfword in the page we need to
7954 * see if it's a 16-bit Thumb insn (which will fit in this TB)
7955 * or a 32-bit Thumb insn (which won't).
7956 * This is to avoid generating a silly TB with a single 16-bit insn
7957 * in it at the end of this page (which would execute correctly
7958 * but isn't very efficient).
7959 */
7960 if (dc->base.is_jmp == DISAS_NEXT
7961 && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
7962 || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
7963 && insn_crosses_page(env, dc)))) {
7964 dc->base.is_jmp = DISAS_TOO_MANY;
7965 }
7966 }
7967
arm_tr_tb_stop(DisasContextBase * dcbase,CPUState * cpu)7968 static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
7969 {
7970 DisasContext *dc = container_of(dcbase, DisasContext, base);
7971
7972 /* At this stage dc->condjmp will only be set when the skipped
7973 instruction was a conditional branch or trap, and the PC has
7974 already been written. */
7975 gen_set_condexec(dc);
7976 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
7977 /* Exception return branches need some special case code at the
7978 * end of the TB, which is complex enough that it has to
7979 * handle the single-step vs not and the condition-failed
7980 * insn codepath itself.
7981 */
7982 gen_bx_excret_final_code(dc);
7983 } else if (unlikely(dc->ss_active)) {
7984 /* Unconditional and "condition passed" instruction codepath. */
7985 switch (dc->base.is_jmp) {
7986 case DISAS_SWI:
7987 gen_ss_advance(dc);
7988 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
7989 break;
7990 case DISAS_HVC:
7991 gen_ss_advance(dc);
7992 gen_exception_el(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7993 break;
7994 case DISAS_SMC:
7995 gen_ss_advance(dc);
7996 gen_exception_el(EXCP_SMC, syn_aa32_smc(), 3);
7997 break;
7998 case DISAS_NEXT:
7999 case DISAS_TOO_MANY:
8000 case DISAS_UPDATE_EXIT:
8001 case DISAS_UPDATE_NOCHAIN:
8002 gen_update_pc(dc, curr_insn_len(dc));
8003 /* fall through */
8004 default:
8005 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
8006 gen_singlestep_exception(dc);
8007 break;
8008 case DISAS_NORETURN:
8009 break;
8010 }
8011 } else {
8012 /* While branches must always occur at the end of an IT block,
8013 there are a few other things that can cause us to terminate
8014 the TB in the middle of an IT block:
8015 - Exception generating instructions (bkpt, swi, undefined).
8016 - Page boundaries.
8017 - Hardware watchpoints.
8018 Hardware breakpoints have already been handled and skip this code.
8019 */
8020 switch (dc->base.is_jmp) {
8021 case DISAS_NEXT:
8022 case DISAS_TOO_MANY:
8023 gen_goto_tb(dc, 1, curr_insn_len(dc));
8024 break;
8025 case DISAS_UPDATE_NOCHAIN:
8026 gen_update_pc(dc, curr_insn_len(dc));
8027 /* fall through */
8028 case DISAS_JUMP:
8029 gen_goto_ptr();
8030 break;
8031 case DISAS_UPDATE_EXIT:
8032 gen_update_pc(dc, curr_insn_len(dc));
8033 /* fall through */
8034 default:
8035 /* indicate that the hash table must be used to find the next TB */
8036 tcg_gen_exit_tb(NULL, 0);
8037 break;
8038 case DISAS_NORETURN:
8039 /* nothing more to generate */
8040 break;
8041 case DISAS_WFI:
8042 gen_helper_wfi(tcg_env, tcg_constant_i32(curr_insn_len(dc)));
8043 /*
8044 * The helper doesn't necessarily throw an exception, but we
8045 * must go back to the main loop to check for interrupts anyway.
8046 */
8047 tcg_gen_exit_tb(NULL, 0);
8048 break;
8049 case DISAS_WFE:
8050 gen_helper_wfe(tcg_env);
8051 break;
8052 case DISAS_YIELD:
8053 gen_helper_yield(tcg_env);
8054 break;
8055 case DISAS_SWI:
8056 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
8057 break;
8058 case DISAS_HVC:
8059 gen_exception_el(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
8060 break;
8061 case DISAS_SMC:
8062 gen_exception_el(EXCP_SMC, syn_aa32_smc(), 3);
8063 break;
8064 }
8065 }
8066
8067 if (dc->condjmp) {
8068 /* "Condition failed" instruction codepath for the branch/trap insn */
8069 set_disas_label(dc, dc->condlabel);
8070 gen_set_condexec(dc);
8071 if (unlikely(dc->ss_active)) {
8072 gen_update_pc(dc, curr_insn_len(dc));
8073 gen_singlestep_exception(dc);
8074 } else {
8075 gen_goto_tb(dc, 1, curr_insn_len(dc));
8076 }
8077 }
8078 }
8079
8080 static const TranslatorOps arm_translator_ops = {
8081 .init_disas_context = arm_tr_init_disas_context,
8082 .tb_start = arm_tr_tb_start,
8083 .insn_start = arm_tr_insn_start,
8084 .translate_insn = arm_tr_translate_insn,
8085 .tb_stop = arm_tr_tb_stop,
8086 };
8087
8088 static const TranslatorOps thumb_translator_ops = {
8089 .init_disas_context = arm_tr_init_disas_context,
8090 .tb_start = arm_tr_tb_start,
8091 .insn_start = arm_tr_insn_start,
8092 .translate_insn = thumb_tr_translate_insn,
8093 .tb_stop = arm_tr_tb_stop,
8094 };
8095
8096 /* generate intermediate code for basic block 'tb'. */
gen_intermediate_code(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)8097 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
8098 vaddr pc, void *host_pc)
8099 {
8100 DisasContext dc = { };
8101 const TranslatorOps *ops = &arm_translator_ops;
8102 CPUARMTBFlags tb_flags = arm_tbflags_from_tb(tb);
8103
8104 if (EX_TBFLAG_AM32(tb_flags, THUMB)) {
8105 ops = &thumb_translator_ops;
8106 }
8107 #ifdef TARGET_AARCH64
8108 if (EX_TBFLAG_ANY(tb_flags, AARCH64_STATE)) {
8109 ops = &aarch64_translator_ops;
8110 }
8111 #endif
8112
8113 translator_loop(cpu, tb, max_insns, pc, host_pc, ops, &dc.base);
8114 }
8115