1 /*
2 * AArch64 translation
3 *
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20
21 #include "exec/exec-all.h"
22 #include "translate.h"
23 #include "translate-a64.h"
24 #include "qemu/log.h"
25 #include "arm_ldst.h"
26 #include "semihosting/semihost.h"
27 #include "cpregs.h"
28
29 static TCGv_i64 cpu_X[32];
30 static TCGv_i64 cpu_pc;
31
32 /* Load/store exclusive handling */
33 static TCGv_i64 cpu_exclusive_high;
34
35 static const char *regnames[] = {
36 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
37 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
38 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
39 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
40 };
41
42 enum a64_shift_type {
43 A64_SHIFT_TYPE_LSL = 0,
44 A64_SHIFT_TYPE_LSR = 1,
45 A64_SHIFT_TYPE_ASR = 2,
46 A64_SHIFT_TYPE_ROR = 3
47 };
48
49 /*
50 * Helpers for extracting complex instruction fields
51 */
52
53 /*
54 * For load/store with an unsigned 12 bit immediate scaled by the element
55 * size. The input has the immediate field in bits [14:3] and the element
56 * size in [2:0].
57 */
uimm_scaled(DisasContext * s,int x)58 static int uimm_scaled(DisasContext *s, int x)
59 {
60 unsigned imm = x >> 3;
61 unsigned scale = extract32(x, 0, 3);
62 return imm << scale;
63 }
64
65 /* For load/store memory tags: scale offset by LOG2_TAG_GRANULE */
scale_by_log2_tag_granule(DisasContext * s,int x)66 static int scale_by_log2_tag_granule(DisasContext *s, int x)
67 {
68 return x << LOG2_TAG_GRANULE;
69 }
70
71 /*
72 * Include the generated decoders.
73 */
74
75 #include "decode-sme-fa64.c.inc"
76 #include "decode-a64.c.inc"
77
78 /* Table based decoder typedefs - used when the relevant bits for decode
79 * are too awkwardly scattered across the instruction (eg SIMD).
80 */
81 typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
82
83 typedef struct AArch64DecodeTable {
84 uint32_t pattern;
85 uint32_t mask;
86 AArch64DecodeFn *disas_fn;
87 } AArch64DecodeTable;
88
89 /* initialize TCG globals. */
a64_translate_init(void)90 void a64_translate_init(void)
91 {
92 int i;
93
94 cpu_pc = tcg_global_mem_new_i64(tcg_env,
95 offsetof(CPUARMState, pc),
96 "pc");
97 for (i = 0; i < 32; i++) {
98 cpu_X[i] = tcg_global_mem_new_i64(tcg_env,
99 offsetof(CPUARMState, xregs[i]),
100 regnames[i]);
101 }
102
103 cpu_exclusive_high = tcg_global_mem_new_i64(tcg_env,
104 offsetof(CPUARMState, exclusive_high), "exclusive_high");
105 }
106
107 /*
108 * Return the core mmu_idx to use for A64 load/store insns which
109 * have a "unprivileged load/store" variant. Those insns access
110 * EL0 if executed from an EL which has control over EL0 (usually
111 * EL1) but behave like normal loads and stores if executed from
112 * elsewhere (eg EL3).
113 *
114 * @unpriv : true for the unprivileged encoding; false for the
115 * normal encoding (in which case we will return the same
116 * thing as get_mem_index().
117 */
get_a64_user_mem_index(DisasContext * s,bool unpriv)118 static int get_a64_user_mem_index(DisasContext *s, bool unpriv)
119 {
120 /*
121 * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
122 * which is the usual mmu_idx for this cpu state.
123 */
124 ARMMMUIdx useridx = s->mmu_idx;
125
126 if (unpriv && s->unpriv) {
127 /*
128 * We have pre-computed the condition for AccType_UNPRIV.
129 * Therefore we should never get here with a mmu_idx for
130 * which we do not know the corresponding user mmu_idx.
131 */
132 switch (useridx) {
133 case ARMMMUIdx_E10_1:
134 case ARMMMUIdx_E10_1_PAN:
135 useridx = ARMMMUIdx_E10_0;
136 break;
137 case ARMMMUIdx_E20_2:
138 case ARMMMUIdx_E20_2_PAN:
139 useridx = ARMMMUIdx_E20_0;
140 break;
141 default:
142 g_assert_not_reached();
143 }
144 }
145 return arm_to_core_mmu_idx(useridx);
146 }
147
set_btype_raw(int val)148 static void set_btype_raw(int val)
149 {
150 tcg_gen_st_i32(tcg_constant_i32(val), tcg_env,
151 offsetof(CPUARMState, btype));
152 }
153
set_btype(DisasContext * s,int val)154 static void set_btype(DisasContext *s, int val)
155 {
156 /* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */
157 tcg_debug_assert(val >= 1 && val <= 3);
158 set_btype_raw(val);
159 s->btype = -1;
160 }
161
reset_btype(DisasContext * s)162 static void reset_btype(DisasContext *s)
163 {
164 if (s->btype != 0) {
165 set_btype_raw(0);
166 s->btype = 0;
167 }
168 }
169
gen_pc_plus_diff(DisasContext * s,TCGv_i64 dest,target_long diff)170 static void gen_pc_plus_diff(DisasContext *s, TCGv_i64 dest, target_long diff)
171 {
172 assert(s->pc_save != -1);
173 if (tb_cflags(s->base.tb) & CF_PCREL) {
174 tcg_gen_addi_i64(dest, cpu_pc, (s->pc_curr - s->pc_save) + diff);
175 } else {
176 tcg_gen_movi_i64(dest, s->pc_curr + diff);
177 }
178 }
179
gen_a64_update_pc(DisasContext * s,target_long diff)180 void gen_a64_update_pc(DisasContext *s, target_long diff)
181 {
182 gen_pc_plus_diff(s, cpu_pc, diff);
183 s->pc_save = s->pc_curr + diff;
184 }
185
186 /*
187 * Handle Top Byte Ignore (TBI) bits.
188 *
189 * If address tagging is enabled via the TCR TBI bits:
190 * + for EL2 and EL3 there is only one TBI bit, and if it is set
191 * then the address is zero-extended, clearing bits [63:56]
192 * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
193 * and TBI1 controls addresses with bit 55 == 1.
194 * If the appropriate TBI bit is set for the address then
195 * the address is sign-extended from bit 55 into bits [63:56]
196 *
197 * Here We have concatenated TBI{1,0} into tbi.
198 */
gen_top_byte_ignore(DisasContext * s,TCGv_i64 dst,TCGv_i64 src,int tbi)199 static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
200 TCGv_i64 src, int tbi)
201 {
202 if (tbi == 0) {
203 /* Load unmodified address */
204 tcg_gen_mov_i64(dst, src);
205 } else if (!regime_has_2_ranges(s->mmu_idx)) {
206 /* Force tag byte to all zero */
207 tcg_gen_extract_i64(dst, src, 0, 56);
208 } else {
209 /* Sign-extend from bit 55. */
210 tcg_gen_sextract_i64(dst, src, 0, 56);
211
212 switch (tbi) {
213 case 1:
214 /* tbi0 but !tbi1: only use the extension if positive */
215 tcg_gen_and_i64(dst, dst, src);
216 break;
217 case 2:
218 /* !tbi0 but tbi1: only use the extension if negative */
219 tcg_gen_or_i64(dst, dst, src);
220 break;
221 case 3:
222 /* tbi0 and tbi1: always use the extension */
223 break;
224 default:
225 g_assert_not_reached();
226 }
227 }
228 }
229
gen_a64_set_pc(DisasContext * s,TCGv_i64 src)230 static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
231 {
232 /*
233 * If address tagging is enabled for instructions via the TCR TBI bits,
234 * then loading an address into the PC will clear out any tag.
235 */
236 gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
237 s->pc_save = -1;
238 }
239
240 /*
241 * Handle MTE and/or TBI.
242 *
243 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
244 * for the tag to be present in the FAR_ELx register. But for user-only
245 * mode we do not have a TLB with which to implement this, so we must
246 * remove the top byte now.
247 *
248 * Always return a fresh temporary that we can increment independently
249 * of the write-back address.
250 */
251
clean_data_tbi(DisasContext * s,TCGv_i64 addr)252 TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
253 {
254 TCGv_i64 clean = tcg_temp_new_i64();
255 #ifdef CONFIG_USER_ONLY
256 gen_top_byte_ignore(s, clean, addr, s->tbid);
257 #else
258 tcg_gen_mov_i64(clean, addr);
259 #endif
260 return clean;
261 }
262
263 /* Insert a zero tag into src, with the result at dst. */
gen_address_with_allocation_tag0(TCGv_i64 dst,TCGv_i64 src)264 static void gen_address_with_allocation_tag0(TCGv_i64 dst, TCGv_i64 src)
265 {
266 tcg_gen_andi_i64(dst, src, ~MAKE_64BIT_MASK(56, 4));
267 }
268
gen_probe_access(DisasContext * s,TCGv_i64 ptr,MMUAccessType acc,int log2_size)269 static void gen_probe_access(DisasContext *s, TCGv_i64 ptr,
270 MMUAccessType acc, int log2_size)
271 {
272 gen_helper_probe_access(tcg_env, ptr,
273 tcg_constant_i32(acc),
274 tcg_constant_i32(get_mem_index(s)),
275 tcg_constant_i32(1 << log2_size));
276 }
277
278 /*
279 * For MTE, check a single logical or atomic access. This probes a single
280 * address, the exact one specified. The size and alignment of the access
281 * is not relevant to MTE, per se, but watchpoints do require the size,
282 * and we want to recognize those before making any other changes to state.
283 */
gen_mte_check1_mmuidx(DisasContext * s,TCGv_i64 addr,bool is_write,bool tag_checked,MemOp memop,bool is_unpriv,int core_idx)284 static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
285 bool is_write, bool tag_checked,
286 MemOp memop, bool is_unpriv,
287 int core_idx)
288 {
289 if (tag_checked && s->mte_active[is_unpriv]) {
290 TCGv_i64 ret;
291 int desc = 0;
292
293 desc = FIELD_DP32(desc, MTEDESC, MIDX, core_idx);
294 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
295 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
296 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
297 desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(memop));
298 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, memop_size(memop) - 1);
299
300 ret = tcg_temp_new_i64();
301 gen_helper_mte_check(ret, tcg_env, tcg_constant_i32(desc), addr);
302
303 return ret;
304 }
305 return clean_data_tbi(s, addr);
306 }
307
gen_mte_check1(DisasContext * s,TCGv_i64 addr,bool is_write,bool tag_checked,MemOp memop)308 TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
309 bool tag_checked, MemOp memop)
310 {
311 return gen_mte_check1_mmuidx(s, addr, is_write, tag_checked, memop,
312 false, get_mem_index(s));
313 }
314
315 /*
316 * For MTE, check multiple logical sequential accesses.
317 */
gen_mte_checkN(DisasContext * s,TCGv_i64 addr,bool is_write,bool tag_checked,int total_size,MemOp single_mop)318 TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
319 bool tag_checked, int total_size, MemOp single_mop)
320 {
321 if (tag_checked && s->mte_active[0]) {
322 TCGv_i64 ret;
323 int desc = 0;
324
325 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
326 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
327 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
328 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
329 desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(single_mop));
330 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
331
332 ret = tcg_temp_new_i64();
333 gen_helper_mte_check(ret, tcg_env, tcg_constant_i32(desc), addr);
334
335 return ret;
336 }
337 return clean_data_tbi(s, addr);
338 }
339
340 /*
341 * Generate the special alignment check that applies to AccType_ATOMIC
342 * and AccType_ORDERED insns under FEAT_LSE2: the access need not be
343 * naturally aligned, but it must not cross a 16-byte boundary.
344 * See AArch64.CheckAlignment().
345 */
check_lse2_align(DisasContext * s,int rn,int imm,bool is_write,MemOp mop)346 static void check_lse2_align(DisasContext *s, int rn, int imm,
347 bool is_write, MemOp mop)
348 {
349 TCGv_i32 tmp;
350 TCGv_i64 addr;
351 TCGLabel *over_label;
352 MMUAccessType type;
353 int mmu_idx;
354
355 tmp = tcg_temp_new_i32();
356 tcg_gen_extrl_i64_i32(tmp, cpu_reg_sp(s, rn));
357 tcg_gen_addi_i32(tmp, tmp, imm & 15);
358 tcg_gen_andi_i32(tmp, tmp, 15);
359 tcg_gen_addi_i32(tmp, tmp, memop_size(mop));
360
361 over_label = gen_new_label();
362 tcg_gen_brcondi_i32(TCG_COND_LEU, tmp, 16, over_label);
363
364 addr = tcg_temp_new_i64();
365 tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm);
366
367 type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD,
368 mmu_idx = get_mem_index(s);
369 gen_helper_unaligned_access(tcg_env, addr, tcg_constant_i32(type),
370 tcg_constant_i32(mmu_idx));
371
372 gen_set_label(over_label);
373
374 }
375
376 /* Handle the alignment check for AccType_ATOMIC instructions. */
check_atomic_align(DisasContext * s,int rn,MemOp mop)377 static MemOp check_atomic_align(DisasContext *s, int rn, MemOp mop)
378 {
379 MemOp size = mop & MO_SIZE;
380
381 if (size == MO_8) {
382 return mop;
383 }
384
385 /*
386 * If size == MO_128, this is a LDXP, and the operation is single-copy
387 * atomic for each doubleword, not the entire quadword; it still must
388 * be quadword aligned.
389 */
390 if (size == MO_128) {
391 return finalize_memop_atom(s, MO_128 | MO_ALIGN,
392 MO_ATOM_IFALIGN_PAIR);
393 }
394 if (dc_isar_feature(aa64_lse2, s)) {
395 check_lse2_align(s, rn, 0, true, mop);
396 } else {
397 mop |= MO_ALIGN;
398 }
399 return finalize_memop(s, mop);
400 }
401
402 /* Handle the alignment check for AccType_ORDERED instructions. */
check_ordered_align(DisasContext * s,int rn,int imm,bool is_write,MemOp mop)403 static MemOp check_ordered_align(DisasContext *s, int rn, int imm,
404 bool is_write, MemOp mop)
405 {
406 MemOp size = mop & MO_SIZE;
407
408 if (size == MO_8) {
409 return mop;
410 }
411 if (size == MO_128) {
412 return finalize_memop_atom(s, MO_128 | MO_ALIGN,
413 MO_ATOM_IFALIGN_PAIR);
414 }
415 if (!dc_isar_feature(aa64_lse2, s)) {
416 mop |= MO_ALIGN;
417 } else if (!s->naa) {
418 check_lse2_align(s, rn, imm, is_write, mop);
419 }
420 return finalize_memop(s, mop);
421 }
422
423 typedef struct DisasCompare64 {
424 TCGCond cond;
425 TCGv_i64 value;
426 } DisasCompare64;
427
a64_test_cc(DisasCompare64 * c64,int cc)428 static void a64_test_cc(DisasCompare64 *c64, int cc)
429 {
430 DisasCompare c32;
431
432 arm_test_cc(&c32, cc);
433
434 /*
435 * Sign-extend the 32-bit value so that the GE/LT comparisons work
436 * properly. The NE/EQ comparisons are also fine with this choice.
437 */
438 c64->cond = c32.cond;
439 c64->value = tcg_temp_new_i64();
440 tcg_gen_ext_i32_i64(c64->value, c32.value);
441 }
442
gen_rebuild_hflags(DisasContext * s)443 static void gen_rebuild_hflags(DisasContext *s)
444 {
445 gen_helper_rebuild_hflags_a64(tcg_env, tcg_constant_i32(s->current_el));
446 }
447
gen_exception_internal(int excp)448 static void gen_exception_internal(int excp)
449 {
450 assert(excp_is_internal(excp));
451 gen_helper_exception_internal(tcg_env, tcg_constant_i32(excp));
452 }
453
gen_exception_internal_insn(DisasContext * s,int excp)454 static void gen_exception_internal_insn(DisasContext *s, int excp)
455 {
456 gen_a64_update_pc(s, 0);
457 gen_exception_internal(excp);
458 s->base.is_jmp = DISAS_NORETURN;
459 }
460
gen_exception_bkpt_insn(DisasContext * s,uint32_t syndrome)461 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
462 {
463 gen_a64_update_pc(s, 0);
464 gen_helper_exception_bkpt_insn(tcg_env, tcg_constant_i32(syndrome));
465 s->base.is_jmp = DISAS_NORETURN;
466 }
467
gen_step_complete_exception(DisasContext * s)468 static void gen_step_complete_exception(DisasContext *s)
469 {
470 /* We just completed step of an insn. Move from Active-not-pending
471 * to Active-pending, and then also take the swstep exception.
472 * This corresponds to making the (IMPDEF) choice to prioritize
473 * swstep exceptions over asynchronous exceptions taken to an exception
474 * level where debug is disabled. This choice has the advantage that
475 * we do not need to maintain internal state corresponding to the
476 * ISV/EX syndrome bits between completion of the step and generation
477 * of the exception, and our syndrome information is always correct.
478 */
479 gen_ss_advance(s);
480 gen_swstep_exception(s, 1, s->is_ldex);
481 s->base.is_jmp = DISAS_NORETURN;
482 }
483
use_goto_tb(DisasContext * s,uint64_t dest)484 static inline bool use_goto_tb(DisasContext *s, uint64_t dest)
485 {
486 if (s->ss_active) {
487 return false;
488 }
489 return translator_use_goto_tb(&s->base, dest);
490 }
491
gen_goto_tb(DisasContext * s,int n,int64_t diff)492 static void gen_goto_tb(DisasContext *s, int n, int64_t diff)
493 {
494 if (use_goto_tb(s, s->pc_curr + diff)) {
495 /*
496 * For pcrel, the pc must always be up-to-date on entry to
497 * the linked TB, so that it can use simple additions for all
498 * further adjustments. For !pcrel, the linked TB is compiled
499 * to know its full virtual address, so we can delay the
500 * update to pc to the unlinked path. A long chain of links
501 * can thus avoid many updates to the PC.
502 */
503 if (tb_cflags(s->base.tb) & CF_PCREL) {
504 gen_a64_update_pc(s, diff);
505 tcg_gen_goto_tb(n);
506 } else {
507 tcg_gen_goto_tb(n);
508 gen_a64_update_pc(s, diff);
509 }
510 tcg_gen_exit_tb(s->base.tb, n);
511 s->base.is_jmp = DISAS_NORETURN;
512 } else {
513 gen_a64_update_pc(s, diff);
514 if (s->ss_active) {
515 gen_step_complete_exception(s);
516 } else {
517 tcg_gen_lookup_and_goto_ptr();
518 s->base.is_jmp = DISAS_NORETURN;
519 }
520 }
521 }
522
523 /*
524 * Register access functions
525 *
526 * These functions are used for directly accessing a register in where
527 * changes to the final register value are likely to be made. If you
528 * need to use a register for temporary calculation (e.g. index type
529 * operations) use the read_* form.
530 *
531 * B1.2.1 Register mappings
532 *
533 * In instruction register encoding 31 can refer to ZR (zero register) or
534 * the SP (stack pointer) depending on context. In QEMU's case we map SP
535 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
536 * This is the point of the _sp forms.
537 */
cpu_reg(DisasContext * s,int reg)538 TCGv_i64 cpu_reg(DisasContext *s, int reg)
539 {
540 if (reg == 31) {
541 TCGv_i64 t = tcg_temp_new_i64();
542 tcg_gen_movi_i64(t, 0);
543 return t;
544 } else {
545 return cpu_X[reg];
546 }
547 }
548
549 /* register access for when 31 == SP */
cpu_reg_sp(DisasContext * s,int reg)550 TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
551 {
552 return cpu_X[reg];
553 }
554
555 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
556 * representing the register contents. This TCGv is an auto-freed
557 * temporary so it need not be explicitly freed, and may be modified.
558 */
read_cpu_reg(DisasContext * s,int reg,int sf)559 TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
560 {
561 TCGv_i64 v = tcg_temp_new_i64();
562 if (reg != 31) {
563 if (sf) {
564 tcg_gen_mov_i64(v, cpu_X[reg]);
565 } else {
566 tcg_gen_ext32u_i64(v, cpu_X[reg]);
567 }
568 } else {
569 tcg_gen_movi_i64(v, 0);
570 }
571 return v;
572 }
573
read_cpu_reg_sp(DisasContext * s,int reg,int sf)574 TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
575 {
576 TCGv_i64 v = tcg_temp_new_i64();
577 if (sf) {
578 tcg_gen_mov_i64(v, cpu_X[reg]);
579 } else {
580 tcg_gen_ext32u_i64(v, cpu_X[reg]);
581 }
582 return v;
583 }
584
585 /* Return the offset into CPUARMState of a slice (from
586 * the least significant end) of FP register Qn (ie
587 * Dn, Sn, Hn or Bn).
588 * (Note that this is not the same mapping as for A32; see cpu.h)
589 */
fp_reg_offset(DisasContext * s,int regno,MemOp size)590 static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size)
591 {
592 return vec_reg_offset(s, regno, 0, size);
593 }
594
595 /* Offset of the high half of the 128 bit vector Qn */
fp_reg_hi_offset(DisasContext * s,int regno)596 static inline int fp_reg_hi_offset(DisasContext *s, int regno)
597 {
598 return vec_reg_offset(s, regno, 1, MO_64);
599 }
600
601 /* Convenience accessors for reading and writing single and double
602 * FP registers. Writing clears the upper parts of the associated
603 * 128 bit vector register, as required by the architecture.
604 * Note that unlike the GP register accessors, the values returned
605 * by the read functions must be manually freed.
606 */
read_fp_dreg(DisasContext * s,int reg)607 static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
608 {
609 TCGv_i64 v = tcg_temp_new_i64();
610
611 tcg_gen_ld_i64(v, tcg_env, fp_reg_offset(s, reg, MO_64));
612 return v;
613 }
614
read_fp_sreg(DisasContext * s,int reg)615 static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
616 {
617 TCGv_i32 v = tcg_temp_new_i32();
618
619 tcg_gen_ld_i32(v, tcg_env, fp_reg_offset(s, reg, MO_32));
620 return v;
621 }
622
read_fp_hreg(DisasContext * s,int reg)623 static TCGv_i32 read_fp_hreg(DisasContext *s, int reg)
624 {
625 TCGv_i32 v = tcg_temp_new_i32();
626
627 tcg_gen_ld16u_i32(v, tcg_env, fp_reg_offset(s, reg, MO_16));
628 return v;
629 }
630
631 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
632 * If SVE is not enabled, then there are only 128 bits in the vector.
633 */
clear_vec_high(DisasContext * s,bool is_q,int rd)634 static void clear_vec_high(DisasContext *s, bool is_q, int rd)
635 {
636 unsigned ofs = fp_reg_offset(s, rd, MO_64);
637 unsigned vsz = vec_full_reg_size(s);
638
639 /* Nop move, with side effect of clearing the tail. */
640 tcg_gen_gvec_mov(MO_64, ofs, ofs, is_q ? 16 : 8, vsz);
641 }
642
write_fp_dreg(DisasContext * s,int reg,TCGv_i64 v)643 void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
644 {
645 unsigned ofs = fp_reg_offset(s, reg, MO_64);
646
647 tcg_gen_st_i64(v, tcg_env, ofs);
648 clear_vec_high(s, false, reg);
649 }
650
write_fp_sreg(DisasContext * s,int reg,TCGv_i32 v)651 static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
652 {
653 TCGv_i64 tmp = tcg_temp_new_i64();
654
655 tcg_gen_extu_i32_i64(tmp, v);
656 write_fp_dreg(s, reg, tmp);
657 }
658
659 /* Expand a 2-operand AdvSIMD vector operation using an expander function. */
gen_gvec_fn2(DisasContext * s,bool is_q,int rd,int rn,GVecGen2Fn * gvec_fn,int vece)660 static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
661 GVecGen2Fn *gvec_fn, int vece)
662 {
663 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
664 is_q ? 16 : 8, vec_full_reg_size(s));
665 }
666
667 /* Expand a 2-operand + immediate AdvSIMD vector operation using
668 * an expander function.
669 */
gen_gvec_fn2i(DisasContext * s,bool is_q,int rd,int rn,int64_t imm,GVecGen2iFn * gvec_fn,int vece)670 static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
671 int64_t imm, GVecGen2iFn *gvec_fn, int vece)
672 {
673 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
674 imm, is_q ? 16 : 8, vec_full_reg_size(s));
675 }
676
677 /* Expand a 3-operand AdvSIMD vector operation using an expander function. */
gen_gvec_fn3(DisasContext * s,bool is_q,int rd,int rn,int rm,GVecGen3Fn * gvec_fn,int vece)678 static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
679 GVecGen3Fn *gvec_fn, int vece)
680 {
681 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
682 vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
683 }
684
685 /* Expand a 4-operand AdvSIMD vector operation using an expander function. */
gen_gvec_fn4(DisasContext * s,bool is_q,int rd,int rn,int rm,int rx,GVecGen4Fn * gvec_fn,int vece)686 static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
687 int rx, GVecGen4Fn *gvec_fn, int vece)
688 {
689 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
690 vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx),
691 is_q ? 16 : 8, vec_full_reg_size(s));
692 }
693
694 /* Expand a 2-operand operation using an out-of-line helper. */
gen_gvec_op2_ool(DisasContext * s,bool is_q,int rd,int rn,int data,gen_helper_gvec_2 * fn)695 static void gen_gvec_op2_ool(DisasContext *s, bool is_q, int rd,
696 int rn, int data, gen_helper_gvec_2 *fn)
697 {
698 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
699 vec_full_reg_offset(s, rn),
700 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
701 }
702
703 /* Expand a 3-operand operation using an out-of-line helper. */
gen_gvec_op3_ool(DisasContext * s,bool is_q,int rd,int rn,int rm,int data,gen_helper_gvec_3 * fn)704 static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
705 int rn, int rm, int data, gen_helper_gvec_3 *fn)
706 {
707 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
708 vec_full_reg_offset(s, rn),
709 vec_full_reg_offset(s, rm),
710 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
711 }
712
713 /* Expand a 3-operand + fpstatus pointer + simd data value operation using
714 * an out-of-line helper.
715 */
gen_gvec_op3_fpst(DisasContext * s,bool is_q,int rd,int rn,int rm,bool is_fp16,int data,gen_helper_gvec_3_ptr * fn)716 static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
717 int rm, bool is_fp16, int data,
718 gen_helper_gvec_3_ptr *fn)
719 {
720 TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
721 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
722 vec_full_reg_offset(s, rn),
723 vec_full_reg_offset(s, rm), fpst,
724 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
725 }
726
727 /* Expand a 4-operand operation using an out-of-line helper. */
gen_gvec_op4_ool(DisasContext * s,bool is_q,int rd,int rn,int rm,int ra,int data,gen_helper_gvec_4 * fn)728 static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn,
729 int rm, int ra, int data, gen_helper_gvec_4 *fn)
730 {
731 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
732 vec_full_reg_offset(s, rn),
733 vec_full_reg_offset(s, rm),
734 vec_full_reg_offset(s, ra),
735 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
736 }
737
738 /*
739 * Expand a 4-operand + fpstatus pointer + simd data value operation using
740 * an out-of-line helper.
741 */
gen_gvec_op4_fpst(DisasContext * s,bool is_q,int rd,int rn,int rm,int ra,bool is_fp16,int data,gen_helper_gvec_4_ptr * fn)742 static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn,
743 int rm, int ra, bool is_fp16, int data,
744 gen_helper_gvec_4_ptr *fn)
745 {
746 TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
747 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
748 vec_full_reg_offset(s, rn),
749 vec_full_reg_offset(s, rm),
750 vec_full_reg_offset(s, ra), fpst,
751 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
752 }
753
754 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
755 * than the 32 bit equivalent.
756 */
gen_set_NZ64(TCGv_i64 result)757 static inline void gen_set_NZ64(TCGv_i64 result)
758 {
759 tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
760 tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
761 }
762
763 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
gen_logic_CC(int sf,TCGv_i64 result)764 static inline void gen_logic_CC(int sf, TCGv_i64 result)
765 {
766 if (sf) {
767 gen_set_NZ64(result);
768 } else {
769 tcg_gen_extrl_i64_i32(cpu_ZF, result);
770 tcg_gen_mov_i32(cpu_NF, cpu_ZF);
771 }
772 tcg_gen_movi_i32(cpu_CF, 0);
773 tcg_gen_movi_i32(cpu_VF, 0);
774 }
775
776 /* dest = T0 + T1; compute C, N, V and Z flags */
gen_add64_CC(TCGv_i64 dest,TCGv_i64 t0,TCGv_i64 t1)777 static void gen_add64_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
778 {
779 TCGv_i64 result, flag, tmp;
780 result = tcg_temp_new_i64();
781 flag = tcg_temp_new_i64();
782 tmp = tcg_temp_new_i64();
783
784 tcg_gen_movi_i64(tmp, 0);
785 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
786
787 tcg_gen_extrl_i64_i32(cpu_CF, flag);
788
789 gen_set_NZ64(result);
790
791 tcg_gen_xor_i64(flag, result, t0);
792 tcg_gen_xor_i64(tmp, t0, t1);
793 tcg_gen_andc_i64(flag, flag, tmp);
794 tcg_gen_extrh_i64_i32(cpu_VF, flag);
795
796 tcg_gen_mov_i64(dest, result);
797 }
798
gen_add32_CC(TCGv_i64 dest,TCGv_i64 t0,TCGv_i64 t1)799 static void gen_add32_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
800 {
801 TCGv_i32 t0_32 = tcg_temp_new_i32();
802 TCGv_i32 t1_32 = tcg_temp_new_i32();
803 TCGv_i32 tmp = tcg_temp_new_i32();
804
805 tcg_gen_movi_i32(tmp, 0);
806 tcg_gen_extrl_i64_i32(t0_32, t0);
807 tcg_gen_extrl_i64_i32(t1_32, t1);
808 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
809 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
810 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
811 tcg_gen_xor_i32(tmp, t0_32, t1_32);
812 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
813 tcg_gen_extu_i32_i64(dest, cpu_NF);
814 }
815
gen_add_CC(int sf,TCGv_i64 dest,TCGv_i64 t0,TCGv_i64 t1)816 static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
817 {
818 if (sf) {
819 gen_add64_CC(dest, t0, t1);
820 } else {
821 gen_add32_CC(dest, t0, t1);
822 }
823 }
824
825 /* dest = T0 - T1; compute C, N, V and Z flags */
gen_sub64_CC(TCGv_i64 dest,TCGv_i64 t0,TCGv_i64 t1)826 static void gen_sub64_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
827 {
828 /* 64 bit arithmetic */
829 TCGv_i64 result, flag, tmp;
830
831 result = tcg_temp_new_i64();
832 flag = tcg_temp_new_i64();
833 tcg_gen_sub_i64(result, t0, t1);
834
835 gen_set_NZ64(result);
836
837 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
838 tcg_gen_extrl_i64_i32(cpu_CF, flag);
839
840 tcg_gen_xor_i64(flag, result, t0);
841 tmp = tcg_temp_new_i64();
842 tcg_gen_xor_i64(tmp, t0, t1);
843 tcg_gen_and_i64(flag, flag, tmp);
844 tcg_gen_extrh_i64_i32(cpu_VF, flag);
845 tcg_gen_mov_i64(dest, result);
846 }
847
gen_sub32_CC(TCGv_i64 dest,TCGv_i64 t0,TCGv_i64 t1)848 static void gen_sub32_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
849 {
850 /* 32 bit arithmetic */
851 TCGv_i32 t0_32 = tcg_temp_new_i32();
852 TCGv_i32 t1_32 = tcg_temp_new_i32();
853 TCGv_i32 tmp;
854
855 tcg_gen_extrl_i64_i32(t0_32, t0);
856 tcg_gen_extrl_i64_i32(t1_32, t1);
857 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
858 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
859 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
860 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
861 tmp = tcg_temp_new_i32();
862 tcg_gen_xor_i32(tmp, t0_32, t1_32);
863 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
864 tcg_gen_extu_i32_i64(dest, cpu_NF);
865 }
866
gen_sub_CC(int sf,TCGv_i64 dest,TCGv_i64 t0,TCGv_i64 t1)867 static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
868 {
869 if (sf) {
870 gen_sub64_CC(dest, t0, t1);
871 } else {
872 gen_sub32_CC(dest, t0, t1);
873 }
874 }
875
876 /* dest = T0 + T1 + CF; do not compute flags. */
gen_adc(int sf,TCGv_i64 dest,TCGv_i64 t0,TCGv_i64 t1)877 static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
878 {
879 TCGv_i64 flag = tcg_temp_new_i64();
880 tcg_gen_extu_i32_i64(flag, cpu_CF);
881 tcg_gen_add_i64(dest, t0, t1);
882 tcg_gen_add_i64(dest, dest, flag);
883
884 if (!sf) {
885 tcg_gen_ext32u_i64(dest, dest);
886 }
887 }
888
889 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
gen_adc_CC(int sf,TCGv_i64 dest,TCGv_i64 t0,TCGv_i64 t1)890 static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
891 {
892 if (sf) {
893 TCGv_i64 result = tcg_temp_new_i64();
894 TCGv_i64 cf_64 = tcg_temp_new_i64();
895 TCGv_i64 vf_64 = tcg_temp_new_i64();
896 TCGv_i64 tmp = tcg_temp_new_i64();
897 TCGv_i64 zero = tcg_constant_i64(0);
898
899 tcg_gen_extu_i32_i64(cf_64, cpu_CF);
900 tcg_gen_add2_i64(result, cf_64, t0, zero, cf_64, zero);
901 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, zero);
902 tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
903 gen_set_NZ64(result);
904
905 tcg_gen_xor_i64(vf_64, result, t0);
906 tcg_gen_xor_i64(tmp, t0, t1);
907 tcg_gen_andc_i64(vf_64, vf_64, tmp);
908 tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
909
910 tcg_gen_mov_i64(dest, result);
911 } else {
912 TCGv_i32 t0_32 = tcg_temp_new_i32();
913 TCGv_i32 t1_32 = tcg_temp_new_i32();
914 TCGv_i32 tmp = tcg_temp_new_i32();
915 TCGv_i32 zero = tcg_constant_i32(0);
916
917 tcg_gen_extrl_i64_i32(t0_32, t0);
918 tcg_gen_extrl_i64_i32(t1_32, t1);
919 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, zero, cpu_CF, zero);
920 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, zero);
921
922 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
923 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
924 tcg_gen_xor_i32(tmp, t0_32, t1_32);
925 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
926 tcg_gen_extu_i32_i64(dest, cpu_NF);
927 }
928 }
929
930 /*
931 * Load/Store generators
932 */
933
934 /*
935 * Store from GPR register to memory.
936 */
do_gpr_st_memidx(DisasContext * s,TCGv_i64 source,TCGv_i64 tcg_addr,MemOp memop,int memidx,bool iss_valid,unsigned int iss_srt,bool iss_sf,bool iss_ar)937 static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
938 TCGv_i64 tcg_addr, MemOp memop, int memidx,
939 bool iss_valid,
940 unsigned int iss_srt,
941 bool iss_sf, bool iss_ar)
942 {
943 tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop);
944
945 if (iss_valid) {
946 uint32_t syn;
947
948 syn = syn_data_abort_with_iss(0,
949 (memop & MO_SIZE),
950 false,
951 iss_srt,
952 iss_sf,
953 iss_ar,
954 0, 0, 0, 0, 0, false);
955 disas_set_insn_syndrome(s, syn);
956 }
957 }
958
do_gpr_st(DisasContext * s,TCGv_i64 source,TCGv_i64 tcg_addr,MemOp memop,bool iss_valid,unsigned int iss_srt,bool iss_sf,bool iss_ar)959 static void do_gpr_st(DisasContext *s, TCGv_i64 source,
960 TCGv_i64 tcg_addr, MemOp memop,
961 bool iss_valid,
962 unsigned int iss_srt,
963 bool iss_sf, bool iss_ar)
964 {
965 do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s),
966 iss_valid, iss_srt, iss_sf, iss_ar);
967 }
968
969 /*
970 * Load from memory to GPR register
971 */
do_gpr_ld_memidx(DisasContext * s,TCGv_i64 dest,TCGv_i64 tcg_addr,MemOp memop,bool extend,int memidx,bool iss_valid,unsigned int iss_srt,bool iss_sf,bool iss_ar)972 static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
973 MemOp memop, bool extend, int memidx,
974 bool iss_valid, unsigned int iss_srt,
975 bool iss_sf, bool iss_ar)
976 {
977 tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
978
979 if (extend && (memop & MO_SIGN)) {
980 g_assert((memop & MO_SIZE) <= MO_32);
981 tcg_gen_ext32u_i64(dest, dest);
982 }
983
984 if (iss_valid) {
985 uint32_t syn;
986
987 syn = syn_data_abort_with_iss(0,
988 (memop & MO_SIZE),
989 (memop & MO_SIGN) != 0,
990 iss_srt,
991 iss_sf,
992 iss_ar,
993 0, 0, 0, 0, 0, false);
994 disas_set_insn_syndrome(s, syn);
995 }
996 }
997
do_gpr_ld(DisasContext * s,TCGv_i64 dest,TCGv_i64 tcg_addr,MemOp memop,bool extend,bool iss_valid,unsigned int iss_srt,bool iss_sf,bool iss_ar)998 static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
999 MemOp memop, bool extend,
1000 bool iss_valid, unsigned int iss_srt,
1001 bool iss_sf, bool iss_ar)
1002 {
1003 do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s),
1004 iss_valid, iss_srt, iss_sf, iss_ar);
1005 }
1006
1007 /*
1008 * Store from FP register to memory
1009 */
do_fp_st(DisasContext * s,int srcidx,TCGv_i64 tcg_addr,MemOp mop)1010 static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, MemOp mop)
1011 {
1012 /* This writes the bottom N bits of a 128 bit wide vector to memory */
1013 TCGv_i64 tmplo = tcg_temp_new_i64();
1014
1015 tcg_gen_ld_i64(tmplo, tcg_env, fp_reg_offset(s, srcidx, MO_64));
1016
1017 if ((mop & MO_SIZE) < MO_128) {
1018 tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
1019 } else {
1020 TCGv_i64 tmphi = tcg_temp_new_i64();
1021 TCGv_i128 t16 = tcg_temp_new_i128();
1022
1023 tcg_gen_ld_i64(tmphi, tcg_env, fp_reg_hi_offset(s, srcidx));
1024 tcg_gen_concat_i64_i128(t16, tmplo, tmphi);
1025
1026 tcg_gen_qemu_st_i128(t16, tcg_addr, get_mem_index(s), mop);
1027 }
1028 }
1029
1030 /*
1031 * Load from memory to FP register
1032 */
do_fp_ld(DisasContext * s,int destidx,TCGv_i64 tcg_addr,MemOp mop)1033 static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, MemOp mop)
1034 {
1035 /* This always zero-extends and writes to a full 128 bit wide vector */
1036 TCGv_i64 tmplo = tcg_temp_new_i64();
1037 TCGv_i64 tmphi = NULL;
1038
1039 if ((mop & MO_SIZE) < MO_128) {
1040 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop);
1041 } else {
1042 TCGv_i128 t16 = tcg_temp_new_i128();
1043
1044 tcg_gen_qemu_ld_i128(t16, tcg_addr, get_mem_index(s), mop);
1045
1046 tmphi = tcg_temp_new_i64();
1047 tcg_gen_extr_i128_i64(tmplo, tmphi, t16);
1048 }
1049
1050 tcg_gen_st_i64(tmplo, tcg_env, fp_reg_offset(s, destidx, MO_64));
1051
1052 if (tmphi) {
1053 tcg_gen_st_i64(tmphi, tcg_env, fp_reg_hi_offset(s, destidx));
1054 }
1055 clear_vec_high(s, tmphi != NULL, destidx);
1056 }
1057
1058 /*
1059 * Vector load/store helpers.
1060 *
1061 * The principal difference between this and a FP load is that we don't
1062 * zero extend as we are filling a partial chunk of the vector register.
1063 * These functions don't support 128 bit loads/stores, which would be
1064 * normal load/store operations.
1065 *
1066 * The _i32 versions are useful when operating on 32 bit quantities
1067 * (eg for floating point single or using Neon helper functions).
1068 */
1069
1070 /* Get value of an element within a vector register */
read_vec_element(DisasContext * s,TCGv_i64 tcg_dest,int srcidx,int element,MemOp memop)1071 static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
1072 int element, MemOp memop)
1073 {
1074 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1075 switch ((unsigned)memop) {
1076 case MO_8:
1077 tcg_gen_ld8u_i64(tcg_dest, tcg_env, vect_off);
1078 break;
1079 case MO_16:
1080 tcg_gen_ld16u_i64(tcg_dest, tcg_env, vect_off);
1081 break;
1082 case MO_32:
1083 tcg_gen_ld32u_i64(tcg_dest, tcg_env, vect_off);
1084 break;
1085 case MO_8|MO_SIGN:
1086 tcg_gen_ld8s_i64(tcg_dest, tcg_env, vect_off);
1087 break;
1088 case MO_16|MO_SIGN:
1089 tcg_gen_ld16s_i64(tcg_dest, tcg_env, vect_off);
1090 break;
1091 case MO_32|MO_SIGN:
1092 tcg_gen_ld32s_i64(tcg_dest, tcg_env, vect_off);
1093 break;
1094 case MO_64:
1095 case MO_64|MO_SIGN:
1096 tcg_gen_ld_i64(tcg_dest, tcg_env, vect_off);
1097 break;
1098 default:
1099 g_assert_not_reached();
1100 }
1101 }
1102
read_vec_element_i32(DisasContext * s,TCGv_i32 tcg_dest,int srcidx,int element,MemOp memop)1103 static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1104 int element, MemOp memop)
1105 {
1106 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1107 switch (memop) {
1108 case MO_8:
1109 tcg_gen_ld8u_i32(tcg_dest, tcg_env, vect_off);
1110 break;
1111 case MO_16:
1112 tcg_gen_ld16u_i32(tcg_dest, tcg_env, vect_off);
1113 break;
1114 case MO_8|MO_SIGN:
1115 tcg_gen_ld8s_i32(tcg_dest, tcg_env, vect_off);
1116 break;
1117 case MO_16|MO_SIGN:
1118 tcg_gen_ld16s_i32(tcg_dest, tcg_env, vect_off);
1119 break;
1120 case MO_32:
1121 case MO_32|MO_SIGN:
1122 tcg_gen_ld_i32(tcg_dest, tcg_env, vect_off);
1123 break;
1124 default:
1125 g_assert_not_reached();
1126 }
1127 }
1128
1129 /* Set value of an element within a vector register */
write_vec_element(DisasContext * s,TCGv_i64 tcg_src,int destidx,int element,MemOp memop)1130 static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1131 int element, MemOp memop)
1132 {
1133 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1134 switch (memop) {
1135 case MO_8:
1136 tcg_gen_st8_i64(tcg_src, tcg_env, vect_off);
1137 break;
1138 case MO_16:
1139 tcg_gen_st16_i64(tcg_src, tcg_env, vect_off);
1140 break;
1141 case MO_32:
1142 tcg_gen_st32_i64(tcg_src, tcg_env, vect_off);
1143 break;
1144 case MO_64:
1145 tcg_gen_st_i64(tcg_src, tcg_env, vect_off);
1146 break;
1147 default:
1148 g_assert_not_reached();
1149 }
1150 }
1151
write_vec_element_i32(DisasContext * s,TCGv_i32 tcg_src,int destidx,int element,MemOp memop)1152 static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1153 int destidx, int element, MemOp memop)
1154 {
1155 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1156 switch (memop) {
1157 case MO_8:
1158 tcg_gen_st8_i32(tcg_src, tcg_env, vect_off);
1159 break;
1160 case MO_16:
1161 tcg_gen_st16_i32(tcg_src, tcg_env, vect_off);
1162 break;
1163 case MO_32:
1164 tcg_gen_st_i32(tcg_src, tcg_env, vect_off);
1165 break;
1166 default:
1167 g_assert_not_reached();
1168 }
1169 }
1170
1171 /* Store from vector register to memory */
do_vec_st(DisasContext * s,int srcidx,int element,TCGv_i64 tcg_addr,MemOp mop)1172 static void do_vec_st(DisasContext *s, int srcidx, int element,
1173 TCGv_i64 tcg_addr, MemOp mop)
1174 {
1175 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1176
1177 read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE);
1178 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1179 }
1180
1181 /* Load from memory to vector register */
do_vec_ld(DisasContext * s,int destidx,int element,TCGv_i64 tcg_addr,MemOp mop)1182 static void do_vec_ld(DisasContext *s, int destidx, int element,
1183 TCGv_i64 tcg_addr, MemOp mop)
1184 {
1185 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1186
1187 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1188 write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE);
1189 }
1190
1191 /* Check that FP/Neon access is enabled. If it is, return
1192 * true. If not, emit code to generate an appropriate exception,
1193 * and return false; the caller should not emit any code for
1194 * the instruction. Note that this check must happen after all
1195 * unallocated-encoding checks (otherwise the syndrome information
1196 * for the resulting exception will be incorrect).
1197 */
fp_access_check_only(DisasContext * s)1198 static bool fp_access_check_only(DisasContext *s)
1199 {
1200 if (s->fp_excp_el) {
1201 assert(!s->fp_access_checked);
1202 s->fp_access_checked = true;
1203
1204 gen_exception_insn_el(s, 0, EXCP_UDEF,
1205 syn_fp_access_trap(1, 0xe, false, 0),
1206 s->fp_excp_el);
1207 return false;
1208 }
1209 s->fp_access_checked = true;
1210 return true;
1211 }
1212
fp_access_check(DisasContext * s)1213 static bool fp_access_check(DisasContext *s)
1214 {
1215 if (!fp_access_check_only(s)) {
1216 return false;
1217 }
1218 if (s->sme_trap_nonstreaming && s->is_nonstreaming) {
1219 gen_exception_insn(s, 0, EXCP_UDEF,
1220 syn_smetrap(SME_ET_Streaming, false));
1221 return false;
1222 }
1223 return true;
1224 }
1225
1226 /*
1227 * Check that SVE access is enabled. If it is, return true.
1228 * If not, emit code to generate an appropriate exception and return false.
1229 * This function corresponds to CheckSVEEnabled().
1230 */
sve_access_check(DisasContext * s)1231 bool sve_access_check(DisasContext *s)
1232 {
1233 if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) {
1234 assert(dc_isar_feature(aa64_sme, s));
1235 if (!sme_sm_enabled_check(s)) {
1236 goto fail_exit;
1237 }
1238 } else if (s->sve_excp_el) {
1239 gen_exception_insn_el(s, 0, EXCP_UDEF,
1240 syn_sve_access_trap(), s->sve_excp_el);
1241 goto fail_exit;
1242 }
1243 s->sve_access_checked = true;
1244 return fp_access_check(s);
1245
1246 fail_exit:
1247 /* Assert that we only raise one exception per instruction. */
1248 assert(!s->sve_access_checked);
1249 s->sve_access_checked = true;
1250 return false;
1251 }
1252
1253 /*
1254 * Check that SME access is enabled, raise an exception if not.
1255 * Note that this function corresponds to CheckSMEAccess and is
1256 * only used directly for cpregs.
1257 */
sme_access_check(DisasContext * s)1258 static bool sme_access_check(DisasContext *s)
1259 {
1260 if (s->sme_excp_el) {
1261 gen_exception_insn_el(s, 0, EXCP_UDEF,
1262 syn_smetrap(SME_ET_AccessTrap, false),
1263 s->sme_excp_el);
1264 return false;
1265 }
1266 return true;
1267 }
1268
1269 /* This function corresponds to CheckSMEEnabled. */
sme_enabled_check(DisasContext * s)1270 bool sme_enabled_check(DisasContext *s)
1271 {
1272 /*
1273 * Note that unlike sve_excp_el, we have not constrained sme_excp_el
1274 * to be zero when fp_excp_el has priority. This is because we need
1275 * sme_excp_el by itself for cpregs access checks.
1276 */
1277 if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) {
1278 s->fp_access_checked = true;
1279 return sme_access_check(s);
1280 }
1281 return fp_access_check_only(s);
1282 }
1283
1284 /* Common subroutine for CheckSMEAnd*Enabled. */
sme_enabled_check_with_svcr(DisasContext * s,unsigned req)1285 bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req)
1286 {
1287 if (!sme_enabled_check(s)) {
1288 return false;
1289 }
1290 if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) {
1291 gen_exception_insn(s, 0, EXCP_UDEF,
1292 syn_smetrap(SME_ET_NotStreaming, false));
1293 return false;
1294 }
1295 if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) {
1296 gen_exception_insn(s, 0, EXCP_UDEF,
1297 syn_smetrap(SME_ET_InactiveZA, false));
1298 return false;
1299 }
1300 return true;
1301 }
1302
1303 /*
1304 * Expanders for AdvSIMD translation functions.
1305 */
1306
do_gvec_op2_ool(DisasContext * s,arg_qrr_e * a,int data,gen_helper_gvec_2 * fn)1307 static bool do_gvec_op2_ool(DisasContext *s, arg_qrr_e *a, int data,
1308 gen_helper_gvec_2 *fn)
1309 {
1310 if (!a->q && a->esz == MO_64) {
1311 return false;
1312 }
1313 if (fp_access_check(s)) {
1314 gen_gvec_op2_ool(s, a->q, a->rd, a->rn, data, fn);
1315 }
1316 return true;
1317 }
1318
do_gvec_op3_ool(DisasContext * s,arg_qrrr_e * a,int data,gen_helper_gvec_3 * fn)1319 static bool do_gvec_op3_ool(DisasContext *s, arg_qrrr_e *a, int data,
1320 gen_helper_gvec_3 *fn)
1321 {
1322 if (!a->q && a->esz == MO_64) {
1323 return false;
1324 }
1325 if (fp_access_check(s)) {
1326 gen_gvec_op3_ool(s, a->q, a->rd, a->rn, a->rm, data, fn);
1327 }
1328 return true;
1329 }
1330
do_gvec_fn3(DisasContext * s,arg_qrrr_e * a,GVecGen3Fn * fn)1331 static bool do_gvec_fn3(DisasContext *s, arg_qrrr_e *a, GVecGen3Fn *fn)
1332 {
1333 if (!a->q && a->esz == MO_64) {
1334 return false;
1335 }
1336 if (fp_access_check(s)) {
1337 gen_gvec_fn3(s, a->q, a->rd, a->rn, a->rm, fn, a->esz);
1338 }
1339 return true;
1340 }
1341
do_gvec_fn3_no64(DisasContext * s,arg_qrrr_e * a,GVecGen3Fn * fn)1342 static bool do_gvec_fn3_no64(DisasContext *s, arg_qrrr_e *a, GVecGen3Fn *fn)
1343 {
1344 if (a->esz == MO_64) {
1345 return false;
1346 }
1347 if (fp_access_check(s)) {
1348 gen_gvec_fn3(s, a->q, a->rd, a->rn, a->rm, fn, a->esz);
1349 }
1350 return true;
1351 }
1352
do_gvec_fn3_no8_no64(DisasContext * s,arg_qrrr_e * a,GVecGen3Fn * fn)1353 static bool do_gvec_fn3_no8_no64(DisasContext *s, arg_qrrr_e *a, GVecGen3Fn *fn)
1354 {
1355 if (a->esz == MO_8) {
1356 return false;
1357 }
1358 return do_gvec_fn3_no64(s, a, fn);
1359 }
1360
do_gvec_fn4(DisasContext * s,arg_qrrrr_e * a,GVecGen4Fn * fn)1361 static bool do_gvec_fn4(DisasContext *s, arg_qrrrr_e *a, GVecGen4Fn *fn)
1362 {
1363 if (!a->q && a->esz == MO_64) {
1364 return false;
1365 }
1366 if (fp_access_check(s)) {
1367 gen_gvec_fn4(s, a->q, a->rd, a->rn, a->rm, a->ra, fn, a->esz);
1368 }
1369 return true;
1370 }
1371
1372 /*
1373 * This utility function is for doing register extension with an
1374 * optional shift. You will likely want to pass a temporary for the
1375 * destination register. See DecodeRegExtend() in the ARM ARM.
1376 */
ext_and_shift_reg(TCGv_i64 tcg_out,TCGv_i64 tcg_in,int option,unsigned int shift)1377 static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1378 int option, unsigned int shift)
1379 {
1380 int extsize = extract32(option, 0, 2);
1381 bool is_signed = extract32(option, 2, 1);
1382
1383 tcg_gen_ext_i64(tcg_out, tcg_in, extsize | (is_signed ? MO_SIGN : 0));
1384 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1385 }
1386
gen_check_sp_alignment(DisasContext * s)1387 static inline void gen_check_sp_alignment(DisasContext *s)
1388 {
1389 /* The AArch64 architecture mandates that (if enabled via PSTATE
1390 * or SCTLR bits) there is a check that SP is 16-aligned on every
1391 * SP-relative load or store (with an exception generated if it is not).
1392 * In line with general QEMU practice regarding misaligned accesses,
1393 * we omit these checks for the sake of guest program performance.
1394 * This function is provided as a hook so we can more easily add these
1395 * checks in future (possibly as a "favour catching guest program bugs
1396 * over speed" user selectable option).
1397 */
1398 }
1399
1400 /*
1401 * This provides a simple table based table lookup decoder. It is
1402 * intended to be used when the relevant bits for decode are too
1403 * awkwardly placed and switch/if based logic would be confusing and
1404 * deeply nested. Since it's a linear search through the table, tables
1405 * should be kept small.
1406 *
1407 * It returns the first handler where insn & mask == pattern, or
1408 * NULL if there is no match.
1409 * The table is terminated by an empty mask (i.e. 0)
1410 */
lookup_disas_fn(const AArch64DecodeTable * table,uint32_t insn)1411 static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1412 uint32_t insn)
1413 {
1414 const AArch64DecodeTable *tptr = table;
1415
1416 while (tptr->mask) {
1417 if ((insn & tptr->mask) == tptr->pattern) {
1418 return tptr->disas_fn;
1419 }
1420 tptr++;
1421 }
1422 return NULL;
1423 }
1424
1425 /*
1426 * The instruction disassembly implemented here matches
1427 * the instruction encoding classifications in chapter C4
1428 * of the ARM Architecture Reference Manual (DDI0487B_a);
1429 * classification names and decode diagrams here should generally
1430 * match up with those in the manual.
1431 */
1432
trans_B(DisasContext * s,arg_i * a)1433 static bool trans_B(DisasContext *s, arg_i *a)
1434 {
1435 reset_btype(s);
1436 gen_goto_tb(s, 0, a->imm);
1437 return true;
1438 }
1439
trans_BL(DisasContext * s,arg_i * a)1440 static bool trans_BL(DisasContext *s, arg_i *a)
1441 {
1442 gen_pc_plus_diff(s, cpu_reg(s, 30), curr_insn_len(s));
1443 reset_btype(s);
1444 gen_goto_tb(s, 0, a->imm);
1445 return true;
1446 }
1447
1448
trans_CBZ(DisasContext * s,arg_cbz * a)1449 static bool trans_CBZ(DisasContext *s, arg_cbz *a)
1450 {
1451 DisasLabel match;
1452 TCGv_i64 tcg_cmp;
1453
1454 tcg_cmp = read_cpu_reg(s, a->rt, a->sf);
1455 reset_btype(s);
1456
1457 match = gen_disas_label(s);
1458 tcg_gen_brcondi_i64(a->nz ? TCG_COND_NE : TCG_COND_EQ,
1459 tcg_cmp, 0, match.label);
1460 gen_goto_tb(s, 0, 4);
1461 set_disas_label(s, match);
1462 gen_goto_tb(s, 1, a->imm);
1463 return true;
1464 }
1465
trans_TBZ(DisasContext * s,arg_tbz * a)1466 static bool trans_TBZ(DisasContext *s, arg_tbz *a)
1467 {
1468 DisasLabel match;
1469 TCGv_i64 tcg_cmp;
1470
1471 tcg_cmp = tcg_temp_new_i64();
1472 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, a->rt), 1ULL << a->bitpos);
1473
1474 reset_btype(s);
1475
1476 match = gen_disas_label(s);
1477 tcg_gen_brcondi_i64(a->nz ? TCG_COND_NE : TCG_COND_EQ,
1478 tcg_cmp, 0, match.label);
1479 gen_goto_tb(s, 0, 4);
1480 set_disas_label(s, match);
1481 gen_goto_tb(s, 1, a->imm);
1482 return true;
1483 }
1484
trans_B_cond(DisasContext * s,arg_B_cond * a)1485 static bool trans_B_cond(DisasContext *s, arg_B_cond *a)
1486 {
1487 /* BC.cond is only present with FEAT_HBC */
1488 if (a->c && !dc_isar_feature(aa64_hbc, s)) {
1489 return false;
1490 }
1491 reset_btype(s);
1492 if (a->cond < 0x0e) {
1493 /* genuinely conditional branches */
1494 DisasLabel match = gen_disas_label(s);
1495 arm_gen_test_cc(a->cond, match.label);
1496 gen_goto_tb(s, 0, 4);
1497 set_disas_label(s, match);
1498 gen_goto_tb(s, 1, a->imm);
1499 } else {
1500 /* 0xe and 0xf are both "always" conditions */
1501 gen_goto_tb(s, 0, a->imm);
1502 }
1503 return true;
1504 }
1505
set_btype_for_br(DisasContext * s,int rn)1506 static void set_btype_for_br(DisasContext *s, int rn)
1507 {
1508 if (dc_isar_feature(aa64_bti, s)) {
1509 /* BR to {x16,x17} or !guard -> 1, else 3. */
1510 set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
1511 }
1512 }
1513
set_btype_for_blr(DisasContext * s)1514 static void set_btype_for_blr(DisasContext *s)
1515 {
1516 if (dc_isar_feature(aa64_bti, s)) {
1517 /* BLR sets BTYPE to 2, regardless of source guarded page. */
1518 set_btype(s, 2);
1519 }
1520 }
1521
trans_BR(DisasContext * s,arg_r * a)1522 static bool trans_BR(DisasContext *s, arg_r *a)
1523 {
1524 gen_a64_set_pc(s, cpu_reg(s, a->rn));
1525 set_btype_for_br(s, a->rn);
1526 s->base.is_jmp = DISAS_JUMP;
1527 return true;
1528 }
1529
trans_BLR(DisasContext * s,arg_r * a)1530 static bool trans_BLR(DisasContext *s, arg_r *a)
1531 {
1532 TCGv_i64 dst = cpu_reg(s, a->rn);
1533 TCGv_i64 lr = cpu_reg(s, 30);
1534 if (dst == lr) {
1535 TCGv_i64 tmp = tcg_temp_new_i64();
1536 tcg_gen_mov_i64(tmp, dst);
1537 dst = tmp;
1538 }
1539 gen_pc_plus_diff(s, lr, curr_insn_len(s));
1540 gen_a64_set_pc(s, dst);
1541 set_btype_for_blr(s);
1542 s->base.is_jmp = DISAS_JUMP;
1543 return true;
1544 }
1545
trans_RET(DisasContext * s,arg_r * a)1546 static bool trans_RET(DisasContext *s, arg_r *a)
1547 {
1548 gen_a64_set_pc(s, cpu_reg(s, a->rn));
1549 s->base.is_jmp = DISAS_JUMP;
1550 return true;
1551 }
1552
auth_branch_target(DisasContext * s,TCGv_i64 dst,TCGv_i64 modifier,bool use_key_a)1553 static TCGv_i64 auth_branch_target(DisasContext *s, TCGv_i64 dst,
1554 TCGv_i64 modifier, bool use_key_a)
1555 {
1556 TCGv_i64 truedst;
1557 /*
1558 * Return the branch target for a BRAA/RETA/etc, which is either
1559 * just the destination dst, or that value with the pauth check
1560 * done and the code removed from the high bits.
1561 */
1562 if (!s->pauth_active) {
1563 return dst;
1564 }
1565
1566 truedst = tcg_temp_new_i64();
1567 if (use_key_a) {
1568 gen_helper_autia_combined(truedst, tcg_env, dst, modifier);
1569 } else {
1570 gen_helper_autib_combined(truedst, tcg_env, dst, modifier);
1571 }
1572 return truedst;
1573 }
1574
trans_BRAZ(DisasContext * s,arg_braz * a)1575 static bool trans_BRAZ(DisasContext *s, arg_braz *a)
1576 {
1577 TCGv_i64 dst;
1578
1579 if (!dc_isar_feature(aa64_pauth, s)) {
1580 return false;
1581 }
1582
1583 dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m);
1584 gen_a64_set_pc(s, dst);
1585 set_btype_for_br(s, a->rn);
1586 s->base.is_jmp = DISAS_JUMP;
1587 return true;
1588 }
1589
trans_BLRAZ(DisasContext * s,arg_braz * a)1590 static bool trans_BLRAZ(DisasContext *s, arg_braz *a)
1591 {
1592 TCGv_i64 dst, lr;
1593
1594 if (!dc_isar_feature(aa64_pauth, s)) {
1595 return false;
1596 }
1597
1598 dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m);
1599 lr = cpu_reg(s, 30);
1600 if (dst == lr) {
1601 TCGv_i64 tmp = tcg_temp_new_i64();
1602 tcg_gen_mov_i64(tmp, dst);
1603 dst = tmp;
1604 }
1605 gen_pc_plus_diff(s, lr, curr_insn_len(s));
1606 gen_a64_set_pc(s, dst);
1607 set_btype_for_blr(s);
1608 s->base.is_jmp = DISAS_JUMP;
1609 return true;
1610 }
1611
trans_RETA(DisasContext * s,arg_reta * a)1612 static bool trans_RETA(DisasContext *s, arg_reta *a)
1613 {
1614 TCGv_i64 dst;
1615
1616 dst = auth_branch_target(s, cpu_reg(s, 30), cpu_X[31], !a->m);
1617 gen_a64_set_pc(s, dst);
1618 s->base.is_jmp = DISAS_JUMP;
1619 return true;
1620 }
1621
trans_BRA(DisasContext * s,arg_bra * a)1622 static bool trans_BRA(DisasContext *s, arg_bra *a)
1623 {
1624 TCGv_i64 dst;
1625
1626 if (!dc_isar_feature(aa64_pauth, s)) {
1627 return false;
1628 }
1629 dst = auth_branch_target(s, cpu_reg(s,a->rn), cpu_reg_sp(s, a->rm), !a->m);
1630 gen_a64_set_pc(s, dst);
1631 set_btype_for_br(s, a->rn);
1632 s->base.is_jmp = DISAS_JUMP;
1633 return true;
1634 }
1635
trans_BLRA(DisasContext * s,arg_bra * a)1636 static bool trans_BLRA(DisasContext *s, arg_bra *a)
1637 {
1638 TCGv_i64 dst, lr;
1639
1640 if (!dc_isar_feature(aa64_pauth, s)) {
1641 return false;
1642 }
1643 dst = auth_branch_target(s, cpu_reg(s, a->rn), cpu_reg_sp(s, a->rm), !a->m);
1644 lr = cpu_reg(s, 30);
1645 if (dst == lr) {
1646 TCGv_i64 tmp = tcg_temp_new_i64();
1647 tcg_gen_mov_i64(tmp, dst);
1648 dst = tmp;
1649 }
1650 gen_pc_plus_diff(s, lr, curr_insn_len(s));
1651 gen_a64_set_pc(s, dst);
1652 set_btype_for_blr(s);
1653 s->base.is_jmp = DISAS_JUMP;
1654 return true;
1655 }
1656
trans_ERET(DisasContext * s,arg_ERET * a)1657 static bool trans_ERET(DisasContext *s, arg_ERET *a)
1658 {
1659 TCGv_i64 dst;
1660
1661 if (s->current_el == 0) {
1662 return false;
1663 }
1664 if (s->trap_eret) {
1665 gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(0), 2);
1666 return true;
1667 }
1668 dst = tcg_temp_new_i64();
1669 tcg_gen_ld_i64(dst, tcg_env,
1670 offsetof(CPUARMState, elr_el[s->current_el]));
1671
1672 translator_io_start(&s->base);
1673
1674 gen_helper_exception_return(tcg_env, dst);
1675 /* Must exit loop to check un-masked IRQs */
1676 s->base.is_jmp = DISAS_EXIT;
1677 return true;
1678 }
1679
trans_ERETA(DisasContext * s,arg_reta * a)1680 static bool trans_ERETA(DisasContext *s, arg_reta *a)
1681 {
1682 TCGv_i64 dst;
1683
1684 if (!dc_isar_feature(aa64_pauth, s)) {
1685 return false;
1686 }
1687 if (s->current_el == 0) {
1688 return false;
1689 }
1690 /* The FGT trap takes precedence over an auth trap. */
1691 if (s->trap_eret) {
1692 gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(a->m ? 3 : 2), 2);
1693 return true;
1694 }
1695 dst = tcg_temp_new_i64();
1696 tcg_gen_ld_i64(dst, tcg_env,
1697 offsetof(CPUARMState, elr_el[s->current_el]));
1698
1699 dst = auth_branch_target(s, dst, cpu_X[31], !a->m);
1700
1701 translator_io_start(&s->base);
1702
1703 gen_helper_exception_return(tcg_env, dst);
1704 /* Must exit loop to check un-masked IRQs */
1705 s->base.is_jmp = DISAS_EXIT;
1706 return true;
1707 }
1708
trans_NOP(DisasContext * s,arg_NOP * a)1709 static bool trans_NOP(DisasContext *s, arg_NOP *a)
1710 {
1711 return true;
1712 }
1713
trans_YIELD(DisasContext * s,arg_YIELD * a)1714 static bool trans_YIELD(DisasContext *s, arg_YIELD *a)
1715 {
1716 /*
1717 * When running in MTTCG we don't generate jumps to the yield and
1718 * WFE helpers as it won't affect the scheduling of other vCPUs.
1719 * If we wanted to more completely model WFE/SEV so we don't busy
1720 * spin unnecessarily we would need to do something more involved.
1721 */
1722 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1723 s->base.is_jmp = DISAS_YIELD;
1724 }
1725 return true;
1726 }
1727
trans_WFI(DisasContext * s,arg_WFI * a)1728 static bool trans_WFI(DisasContext *s, arg_WFI *a)
1729 {
1730 s->base.is_jmp = DISAS_WFI;
1731 return true;
1732 }
1733
trans_WFE(DisasContext * s,arg_WFI * a)1734 static bool trans_WFE(DisasContext *s, arg_WFI *a)
1735 {
1736 /*
1737 * When running in MTTCG we don't generate jumps to the yield and
1738 * WFE helpers as it won't affect the scheduling of other vCPUs.
1739 * If we wanted to more completely model WFE/SEV so we don't busy
1740 * spin unnecessarily we would need to do something more involved.
1741 */
1742 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1743 s->base.is_jmp = DISAS_WFE;
1744 }
1745 return true;
1746 }
1747
trans_WFIT(DisasContext * s,arg_WFIT * a)1748 static bool trans_WFIT(DisasContext *s, arg_WFIT *a)
1749 {
1750 if (!dc_isar_feature(aa64_wfxt, s)) {
1751 return false;
1752 }
1753
1754 /*
1755 * Because we need to pass the register value to the helper,
1756 * it's easier to emit the code now, unlike trans_WFI which
1757 * defers it to aarch64_tr_tb_stop(). That means we need to
1758 * check ss_active so that single-stepping a WFIT doesn't halt.
1759 */
1760 if (s->ss_active) {
1761 /* Act like a NOP under architectural singlestep */
1762 return true;
1763 }
1764
1765 gen_a64_update_pc(s, 4);
1766 gen_helper_wfit(tcg_env, cpu_reg(s, a->rd));
1767 /* Go back to the main loop to check for interrupts */
1768 s->base.is_jmp = DISAS_EXIT;
1769 return true;
1770 }
1771
trans_WFET(DisasContext * s,arg_WFET * a)1772 static bool trans_WFET(DisasContext *s, arg_WFET *a)
1773 {
1774 if (!dc_isar_feature(aa64_wfxt, s)) {
1775 return false;
1776 }
1777
1778 /*
1779 * We rely here on our WFE implementation being a NOP, so we
1780 * don't need to do anything different to handle the WFET timeout
1781 * from what trans_WFE does.
1782 */
1783 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1784 s->base.is_jmp = DISAS_WFE;
1785 }
1786 return true;
1787 }
1788
trans_XPACLRI(DisasContext * s,arg_XPACLRI * a)1789 static bool trans_XPACLRI(DisasContext *s, arg_XPACLRI *a)
1790 {
1791 if (s->pauth_active) {
1792 gen_helper_xpaci(cpu_X[30], tcg_env, cpu_X[30]);
1793 }
1794 return true;
1795 }
1796
trans_PACIA1716(DisasContext * s,arg_PACIA1716 * a)1797 static bool trans_PACIA1716(DisasContext *s, arg_PACIA1716 *a)
1798 {
1799 if (s->pauth_active) {
1800 gen_helper_pacia(cpu_X[17], tcg_env, cpu_X[17], cpu_X[16]);
1801 }
1802 return true;
1803 }
1804
trans_PACIB1716(DisasContext * s,arg_PACIB1716 * a)1805 static bool trans_PACIB1716(DisasContext *s, arg_PACIB1716 *a)
1806 {
1807 if (s->pauth_active) {
1808 gen_helper_pacib(cpu_X[17], tcg_env, cpu_X[17], cpu_X[16]);
1809 }
1810 return true;
1811 }
1812
trans_AUTIA1716(DisasContext * s,arg_AUTIA1716 * a)1813 static bool trans_AUTIA1716(DisasContext *s, arg_AUTIA1716 *a)
1814 {
1815 if (s->pauth_active) {
1816 gen_helper_autia(cpu_X[17], tcg_env, cpu_X[17], cpu_X[16]);
1817 }
1818 return true;
1819 }
1820
trans_AUTIB1716(DisasContext * s,arg_AUTIB1716 * a)1821 static bool trans_AUTIB1716(DisasContext *s, arg_AUTIB1716 *a)
1822 {
1823 if (s->pauth_active) {
1824 gen_helper_autib(cpu_X[17], tcg_env, cpu_X[17], cpu_X[16]);
1825 }
1826 return true;
1827 }
1828
trans_ESB(DisasContext * s,arg_ESB * a)1829 static bool trans_ESB(DisasContext *s, arg_ESB *a)
1830 {
1831 /* Without RAS, we must implement this as NOP. */
1832 if (dc_isar_feature(aa64_ras, s)) {
1833 /*
1834 * QEMU does not have a source of physical SErrors,
1835 * so we are only concerned with virtual SErrors.
1836 * The pseudocode in the ARM for this case is
1837 * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
1838 * AArch64.vESBOperation();
1839 * Most of the condition can be evaluated at translation time.
1840 * Test for EL2 present, and defer test for SEL2 to runtime.
1841 */
1842 if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
1843 gen_helper_vesb(tcg_env);
1844 }
1845 }
1846 return true;
1847 }
1848
trans_PACIAZ(DisasContext * s,arg_PACIAZ * a)1849 static bool trans_PACIAZ(DisasContext *s, arg_PACIAZ *a)
1850 {
1851 if (s->pauth_active) {
1852 gen_helper_pacia(cpu_X[30], tcg_env, cpu_X[30], tcg_constant_i64(0));
1853 }
1854 return true;
1855 }
1856
trans_PACIASP(DisasContext * s,arg_PACIASP * a)1857 static bool trans_PACIASP(DisasContext *s, arg_PACIASP *a)
1858 {
1859 if (s->pauth_active) {
1860 gen_helper_pacia(cpu_X[30], tcg_env, cpu_X[30], cpu_X[31]);
1861 }
1862 return true;
1863 }
1864
trans_PACIBZ(DisasContext * s,arg_PACIBZ * a)1865 static bool trans_PACIBZ(DisasContext *s, arg_PACIBZ *a)
1866 {
1867 if (s->pauth_active) {
1868 gen_helper_pacib(cpu_X[30], tcg_env, cpu_X[30], tcg_constant_i64(0));
1869 }
1870 return true;
1871 }
1872
trans_PACIBSP(DisasContext * s,arg_PACIBSP * a)1873 static bool trans_PACIBSP(DisasContext *s, arg_PACIBSP *a)
1874 {
1875 if (s->pauth_active) {
1876 gen_helper_pacib(cpu_X[30], tcg_env, cpu_X[30], cpu_X[31]);
1877 }
1878 return true;
1879 }
1880
trans_AUTIAZ(DisasContext * s,arg_AUTIAZ * a)1881 static bool trans_AUTIAZ(DisasContext *s, arg_AUTIAZ *a)
1882 {
1883 if (s->pauth_active) {
1884 gen_helper_autia(cpu_X[30], tcg_env, cpu_X[30], tcg_constant_i64(0));
1885 }
1886 return true;
1887 }
1888
trans_AUTIASP(DisasContext * s,arg_AUTIASP * a)1889 static bool trans_AUTIASP(DisasContext *s, arg_AUTIASP *a)
1890 {
1891 if (s->pauth_active) {
1892 gen_helper_autia(cpu_X[30], tcg_env, cpu_X[30], cpu_X[31]);
1893 }
1894 return true;
1895 }
1896
trans_AUTIBZ(DisasContext * s,arg_AUTIBZ * a)1897 static bool trans_AUTIBZ(DisasContext *s, arg_AUTIBZ *a)
1898 {
1899 if (s->pauth_active) {
1900 gen_helper_autib(cpu_X[30], tcg_env, cpu_X[30], tcg_constant_i64(0));
1901 }
1902 return true;
1903 }
1904
trans_AUTIBSP(DisasContext * s,arg_AUTIBSP * a)1905 static bool trans_AUTIBSP(DisasContext *s, arg_AUTIBSP *a)
1906 {
1907 if (s->pauth_active) {
1908 gen_helper_autib(cpu_X[30], tcg_env, cpu_X[30], cpu_X[31]);
1909 }
1910 return true;
1911 }
1912
trans_CLREX(DisasContext * s,arg_CLREX * a)1913 static bool trans_CLREX(DisasContext *s, arg_CLREX *a)
1914 {
1915 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1916 return true;
1917 }
1918
trans_DSB_DMB(DisasContext * s,arg_DSB_DMB * a)1919 static bool trans_DSB_DMB(DisasContext *s, arg_DSB_DMB *a)
1920 {
1921 /* We handle DSB and DMB the same way */
1922 TCGBar bar;
1923
1924 switch (a->types) {
1925 case 1: /* MBReqTypes_Reads */
1926 bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1927 break;
1928 case 2: /* MBReqTypes_Writes */
1929 bar = TCG_BAR_SC | TCG_MO_ST_ST;
1930 break;
1931 default: /* MBReqTypes_All */
1932 bar = TCG_BAR_SC | TCG_MO_ALL;
1933 break;
1934 }
1935 tcg_gen_mb(bar);
1936 return true;
1937 }
1938
trans_ISB(DisasContext * s,arg_ISB * a)1939 static bool trans_ISB(DisasContext *s, arg_ISB *a)
1940 {
1941 /*
1942 * We need to break the TB after this insn to execute
1943 * self-modifying code correctly and also to take
1944 * any pending interrupts immediately.
1945 */
1946 reset_btype(s);
1947 gen_goto_tb(s, 0, 4);
1948 return true;
1949 }
1950
trans_SB(DisasContext * s,arg_SB * a)1951 static bool trans_SB(DisasContext *s, arg_SB *a)
1952 {
1953 if (!dc_isar_feature(aa64_sb, s)) {
1954 return false;
1955 }
1956 /*
1957 * TODO: There is no speculation barrier opcode for TCG;
1958 * MB and end the TB instead.
1959 */
1960 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1961 gen_goto_tb(s, 0, 4);
1962 return true;
1963 }
1964
trans_CFINV(DisasContext * s,arg_CFINV * a)1965 static bool trans_CFINV(DisasContext *s, arg_CFINV *a)
1966 {
1967 if (!dc_isar_feature(aa64_condm_4, s)) {
1968 return false;
1969 }
1970 tcg_gen_xori_i32(cpu_CF, cpu_CF, 1);
1971 return true;
1972 }
1973
trans_XAFLAG(DisasContext * s,arg_XAFLAG * a)1974 static bool trans_XAFLAG(DisasContext *s, arg_XAFLAG *a)
1975 {
1976 TCGv_i32 z;
1977
1978 if (!dc_isar_feature(aa64_condm_5, s)) {
1979 return false;
1980 }
1981
1982 z = tcg_temp_new_i32();
1983
1984 tcg_gen_setcondi_i32(TCG_COND_EQ, z, cpu_ZF, 0);
1985
1986 /*
1987 * (!C & !Z) << 31
1988 * (!(C | Z)) << 31
1989 * ~((C | Z) << 31)
1990 * ~-(C | Z)
1991 * (C | Z) - 1
1992 */
1993 tcg_gen_or_i32(cpu_NF, cpu_CF, z);
1994 tcg_gen_subi_i32(cpu_NF, cpu_NF, 1);
1995
1996 /* !(Z & C) */
1997 tcg_gen_and_i32(cpu_ZF, z, cpu_CF);
1998 tcg_gen_xori_i32(cpu_ZF, cpu_ZF, 1);
1999
2000 /* (!C & Z) << 31 -> -(Z & ~C) */
2001 tcg_gen_andc_i32(cpu_VF, z, cpu_CF);
2002 tcg_gen_neg_i32(cpu_VF, cpu_VF);
2003
2004 /* C | Z */
2005 tcg_gen_or_i32(cpu_CF, cpu_CF, z);
2006
2007 return true;
2008 }
2009
trans_AXFLAG(DisasContext * s,arg_AXFLAG * a)2010 static bool trans_AXFLAG(DisasContext *s, arg_AXFLAG *a)
2011 {
2012 if (!dc_isar_feature(aa64_condm_5, s)) {
2013 return false;
2014 }
2015
2016 tcg_gen_sari_i32(cpu_VF, cpu_VF, 31); /* V ? -1 : 0 */
2017 tcg_gen_andc_i32(cpu_CF, cpu_CF, cpu_VF); /* C & !V */
2018
2019 /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */
2020 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, cpu_VF);
2021
2022 tcg_gen_movi_i32(cpu_NF, 0);
2023 tcg_gen_movi_i32(cpu_VF, 0);
2024
2025 return true;
2026 }
2027
trans_MSR_i_UAO(DisasContext * s,arg_i * a)2028 static bool trans_MSR_i_UAO(DisasContext *s, arg_i *a)
2029 {
2030 if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
2031 return false;
2032 }
2033 if (a->imm & 1) {
2034 set_pstate_bits(PSTATE_UAO);
2035 } else {
2036 clear_pstate_bits(PSTATE_UAO);
2037 }
2038 gen_rebuild_hflags(s);
2039 s->base.is_jmp = DISAS_TOO_MANY;
2040 return true;
2041 }
2042
trans_MSR_i_PAN(DisasContext * s,arg_i * a)2043 static bool trans_MSR_i_PAN(DisasContext *s, arg_i *a)
2044 {
2045 if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
2046 return false;
2047 }
2048 if (a->imm & 1) {
2049 set_pstate_bits(PSTATE_PAN);
2050 } else {
2051 clear_pstate_bits(PSTATE_PAN);
2052 }
2053 gen_rebuild_hflags(s);
2054 s->base.is_jmp = DISAS_TOO_MANY;
2055 return true;
2056 }
2057
trans_MSR_i_SPSEL(DisasContext * s,arg_i * a)2058 static bool trans_MSR_i_SPSEL(DisasContext *s, arg_i *a)
2059 {
2060 if (s->current_el == 0) {
2061 return false;
2062 }
2063 gen_helper_msr_i_spsel(tcg_env, tcg_constant_i32(a->imm & PSTATE_SP));
2064 s->base.is_jmp = DISAS_TOO_MANY;
2065 return true;
2066 }
2067
trans_MSR_i_SBSS(DisasContext * s,arg_i * a)2068 static bool trans_MSR_i_SBSS(DisasContext *s, arg_i *a)
2069 {
2070 if (!dc_isar_feature(aa64_ssbs, s)) {
2071 return false;
2072 }
2073 if (a->imm & 1) {
2074 set_pstate_bits(PSTATE_SSBS);
2075 } else {
2076 clear_pstate_bits(PSTATE_SSBS);
2077 }
2078 /* Don't need to rebuild hflags since SSBS is a nop */
2079 s->base.is_jmp = DISAS_TOO_MANY;
2080 return true;
2081 }
2082
trans_MSR_i_DIT(DisasContext * s,arg_i * a)2083 static bool trans_MSR_i_DIT(DisasContext *s, arg_i *a)
2084 {
2085 if (!dc_isar_feature(aa64_dit, s)) {
2086 return false;
2087 }
2088 if (a->imm & 1) {
2089 set_pstate_bits(PSTATE_DIT);
2090 } else {
2091 clear_pstate_bits(PSTATE_DIT);
2092 }
2093 /* There's no need to rebuild hflags because DIT is a nop */
2094 s->base.is_jmp = DISAS_TOO_MANY;
2095 return true;
2096 }
2097
trans_MSR_i_TCO(DisasContext * s,arg_i * a)2098 static bool trans_MSR_i_TCO(DisasContext *s, arg_i *a)
2099 {
2100 if (dc_isar_feature(aa64_mte, s)) {
2101 /* Full MTE is enabled -- set the TCO bit as directed. */
2102 if (a->imm & 1) {
2103 set_pstate_bits(PSTATE_TCO);
2104 } else {
2105 clear_pstate_bits(PSTATE_TCO);
2106 }
2107 gen_rebuild_hflags(s);
2108 /* Many factors, including TCO, go into MTE_ACTIVE. */
2109 s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
2110 return true;
2111 } else if (dc_isar_feature(aa64_mte_insn_reg, s)) {
2112 /* Only "instructions accessible at EL0" -- PSTATE.TCO is WI. */
2113 return true;
2114 } else {
2115 /* Insn not present */
2116 return false;
2117 }
2118 }
2119
trans_MSR_i_DAIFSET(DisasContext * s,arg_i * a)2120 static bool trans_MSR_i_DAIFSET(DisasContext *s, arg_i *a)
2121 {
2122 gen_helper_msr_i_daifset(tcg_env, tcg_constant_i32(a->imm));
2123 s->base.is_jmp = DISAS_TOO_MANY;
2124 return true;
2125 }
2126
trans_MSR_i_DAIFCLEAR(DisasContext * s,arg_i * a)2127 static bool trans_MSR_i_DAIFCLEAR(DisasContext *s, arg_i *a)
2128 {
2129 gen_helper_msr_i_daifclear(tcg_env, tcg_constant_i32(a->imm));
2130 /* Exit the cpu loop to re-evaluate pending IRQs. */
2131 s->base.is_jmp = DISAS_UPDATE_EXIT;
2132 return true;
2133 }
2134
trans_MSR_i_ALLINT(DisasContext * s,arg_i * a)2135 static bool trans_MSR_i_ALLINT(DisasContext *s, arg_i *a)
2136 {
2137 if (!dc_isar_feature(aa64_nmi, s) || s->current_el == 0) {
2138 return false;
2139 }
2140
2141 if (a->imm == 0) {
2142 clear_pstate_bits(PSTATE_ALLINT);
2143 } else if (s->current_el > 1) {
2144 set_pstate_bits(PSTATE_ALLINT);
2145 } else {
2146 gen_helper_msr_set_allint_el1(tcg_env);
2147 }
2148
2149 /* Exit the cpu loop to re-evaluate pending IRQs. */
2150 s->base.is_jmp = DISAS_UPDATE_EXIT;
2151 return true;
2152 }
2153
trans_MSR_i_SVCR(DisasContext * s,arg_MSR_i_SVCR * a)2154 static bool trans_MSR_i_SVCR(DisasContext *s, arg_MSR_i_SVCR *a)
2155 {
2156 if (!dc_isar_feature(aa64_sme, s) || a->mask == 0) {
2157 return false;
2158 }
2159 if (sme_access_check(s)) {
2160 int old = s->pstate_sm | (s->pstate_za << 1);
2161 int new = a->imm * 3;
2162
2163 if ((old ^ new) & a->mask) {
2164 /* At least one bit changes. */
2165 gen_helper_set_svcr(tcg_env, tcg_constant_i32(new),
2166 tcg_constant_i32(a->mask));
2167 s->base.is_jmp = DISAS_TOO_MANY;
2168 }
2169 }
2170 return true;
2171 }
2172
gen_get_nzcv(TCGv_i64 tcg_rt)2173 static void gen_get_nzcv(TCGv_i64 tcg_rt)
2174 {
2175 TCGv_i32 tmp = tcg_temp_new_i32();
2176 TCGv_i32 nzcv = tcg_temp_new_i32();
2177
2178 /* build bit 31, N */
2179 tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
2180 /* build bit 30, Z */
2181 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
2182 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
2183 /* build bit 29, C */
2184 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
2185 /* build bit 28, V */
2186 tcg_gen_shri_i32(tmp, cpu_VF, 31);
2187 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
2188 /* generate result */
2189 tcg_gen_extu_i32_i64(tcg_rt, nzcv);
2190 }
2191
gen_set_nzcv(TCGv_i64 tcg_rt)2192 static void gen_set_nzcv(TCGv_i64 tcg_rt)
2193 {
2194 TCGv_i32 nzcv = tcg_temp_new_i32();
2195
2196 /* take NZCV from R[t] */
2197 tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
2198
2199 /* bit 31, N */
2200 tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
2201 /* bit 30, Z */
2202 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
2203 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
2204 /* bit 29, C */
2205 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
2206 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
2207 /* bit 28, V */
2208 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
2209 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
2210 }
2211
gen_sysreg_undef(DisasContext * s,bool isread,uint8_t op0,uint8_t op1,uint8_t op2,uint8_t crn,uint8_t crm,uint8_t rt)2212 static void gen_sysreg_undef(DisasContext *s, bool isread,
2213 uint8_t op0, uint8_t op1, uint8_t op2,
2214 uint8_t crn, uint8_t crm, uint8_t rt)
2215 {
2216 /*
2217 * Generate code to emit an UNDEF with correct syndrome
2218 * information for a failed system register access.
2219 * This is EC_UNCATEGORIZED (ie a standard UNDEF) in most cases,
2220 * but if FEAT_IDST is implemented then read accesses to registers
2221 * in the feature ID space are reported with the EC_SYSTEMREGISTERTRAP
2222 * syndrome.
2223 */
2224 uint32_t syndrome;
2225
2226 if (isread && dc_isar_feature(aa64_ids, s) &&
2227 arm_cpreg_encoding_in_idspace(op0, op1, op2, crn, crm)) {
2228 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
2229 } else {
2230 syndrome = syn_uncategorized();
2231 }
2232 gen_exception_insn(s, 0, EXCP_UDEF, syndrome);
2233 }
2234
2235 /* MRS - move from system register
2236 * MSR (register) - move to system register
2237 * SYS
2238 * SYSL
2239 * These are all essentially the same insn in 'read' and 'write'
2240 * versions, with varying op0 fields.
2241 */
handle_sys(DisasContext * s,bool isread,unsigned int op0,unsigned int op1,unsigned int op2,unsigned int crn,unsigned int crm,unsigned int rt)2242 static void handle_sys(DisasContext *s, bool isread,
2243 unsigned int op0, unsigned int op1, unsigned int op2,
2244 unsigned int crn, unsigned int crm, unsigned int rt)
2245 {
2246 uint32_t key = ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
2247 crn, crm, op0, op1, op2);
2248 const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key);
2249 bool need_exit_tb = false;
2250 bool nv_trap_to_el2 = false;
2251 bool nv_redirect_reg = false;
2252 bool skip_fp_access_checks = false;
2253 bool nv2_mem_redirect = false;
2254 TCGv_ptr tcg_ri = NULL;
2255 TCGv_i64 tcg_rt;
2256 uint32_t syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
2257
2258 if (crn == 11 || crn == 15) {
2259 /*
2260 * Check for TIDCP trap, which must take precedence over
2261 * the UNDEF for "no such register" etc.
2262 */
2263 switch (s->current_el) {
2264 case 0:
2265 if (dc_isar_feature(aa64_tidcp1, s)) {
2266 gen_helper_tidcp_el0(tcg_env, tcg_constant_i32(syndrome));
2267 }
2268 break;
2269 case 1:
2270 gen_helper_tidcp_el1(tcg_env, tcg_constant_i32(syndrome));
2271 break;
2272 }
2273 }
2274
2275 if (!ri) {
2276 /* Unknown register; this might be a guest error or a QEMU
2277 * unimplemented feature.
2278 */
2279 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
2280 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
2281 isread ? "read" : "write", op0, op1, crn, crm, op2);
2282 gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
2283 return;
2284 }
2285
2286 if (s->nv2 && ri->nv2_redirect_offset) {
2287 /*
2288 * Some registers always redirect to memory; some only do so if
2289 * HCR_EL2.NV1 is 0, and some only if NV1 is 1 (these come in
2290 * pairs which share an offset; see the table in R_CSRPQ).
2291 */
2292 if (ri->nv2_redirect_offset & NV2_REDIR_NV1) {
2293 nv2_mem_redirect = s->nv1;
2294 } else if (ri->nv2_redirect_offset & NV2_REDIR_NO_NV1) {
2295 nv2_mem_redirect = !s->nv1;
2296 } else {
2297 nv2_mem_redirect = true;
2298 }
2299 }
2300
2301 /* Check access permissions */
2302 if (!cp_access_ok(s->current_el, ri, isread)) {
2303 /*
2304 * FEAT_NV/NV2 handling does not do the usual FP access checks
2305 * for registers only accessible at EL2 (though it *does* do them
2306 * for registers accessible at EL1).
2307 */
2308 skip_fp_access_checks = true;
2309 if (s->nv2 && (ri->type & ARM_CP_NV2_REDIRECT)) {
2310 /*
2311 * This is one of the few EL2 registers which should redirect
2312 * to the equivalent EL1 register. We do that after running
2313 * the EL2 register's accessfn.
2314 */
2315 nv_redirect_reg = true;
2316 assert(!nv2_mem_redirect);
2317 } else if (nv2_mem_redirect) {
2318 /*
2319 * NV2 redirect-to-memory takes precedence over trap to EL2 or
2320 * UNDEF to EL1.
2321 */
2322 } else if (s->nv && arm_cpreg_traps_in_nv(ri)) {
2323 /*
2324 * This register / instruction exists and is an EL2 register, so
2325 * we must trap to EL2 if accessed in nested virtualization EL1
2326 * instead of UNDEFing. We'll do that after the usual access checks.
2327 * (This makes a difference only for a couple of registers like
2328 * VSTTBR_EL2 where the "UNDEF if NonSecure" should take priority
2329 * over the trap-to-EL2. Most trapped-by-FEAT_NV registers have
2330 * an accessfn which does nothing when called from EL1, because
2331 * the trap-to-EL3 controls which would apply to that register
2332 * at EL2 don't take priority over the FEAT_NV trap-to-EL2.)
2333 */
2334 nv_trap_to_el2 = true;
2335 } else {
2336 gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
2337 return;
2338 }
2339 }
2340
2341 if (ri->accessfn || (ri->fgt && s->fgt_active)) {
2342 /* Emit code to perform further access permissions checks at
2343 * runtime; this may result in an exception.
2344 */
2345 gen_a64_update_pc(s, 0);
2346 tcg_ri = tcg_temp_new_ptr();
2347 gen_helper_access_check_cp_reg(tcg_ri, tcg_env,
2348 tcg_constant_i32(key),
2349 tcg_constant_i32(syndrome),
2350 tcg_constant_i32(isread));
2351 } else if (ri->type & ARM_CP_RAISES_EXC) {
2352 /*
2353 * The readfn or writefn might raise an exception;
2354 * synchronize the CPU state in case it does.
2355 */
2356 gen_a64_update_pc(s, 0);
2357 }
2358
2359 if (!skip_fp_access_checks) {
2360 if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) {
2361 return;
2362 } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
2363 return;
2364 } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) {
2365 return;
2366 }
2367 }
2368
2369 if (nv_trap_to_el2) {
2370 gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2);
2371 return;
2372 }
2373
2374 if (nv_redirect_reg) {
2375 /*
2376 * FEAT_NV2 redirection of an EL2 register to an EL1 register.
2377 * Conveniently in all cases the encoding of the EL1 register is
2378 * identical to the EL2 register except that opc1 is 0.
2379 * Get the reginfo for the EL1 register to use for the actual access.
2380 * We don't use the EL1 register's access function, and
2381 * fine-grained-traps on EL1 also do not apply here.
2382 */
2383 key = ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
2384 crn, crm, op0, 0, op2);
2385 ri = get_arm_cp_reginfo(s->cp_regs, key);
2386 assert(ri);
2387 assert(cp_access_ok(s->current_el, ri, isread));
2388 /*
2389 * We might not have done an update_pc earlier, so check we don't
2390 * need it. We could support this in future if necessary.
2391 */
2392 assert(!(ri->type & ARM_CP_RAISES_EXC));
2393 }
2394
2395 if (nv2_mem_redirect) {
2396 /*
2397 * This system register is being redirected into an EL2 memory access.
2398 * This means it is not an IO operation, doesn't change hflags,
2399 * and need not end the TB, because it has no side effects.
2400 *
2401 * The access is 64-bit single copy atomic, guaranteed aligned because
2402 * of the definition of VCNR_EL2. Its endianness depends on
2403 * SCTLR_EL2.EE, not on the data endianness of EL1.
2404 * It is done under either the EL2 translation regime or the EL2&0
2405 * translation regime, depending on HCR_EL2.E2H. It behaves as if
2406 * PSTATE.PAN is 0.
2407 */
2408 TCGv_i64 ptr = tcg_temp_new_i64();
2409 MemOp mop = MO_64 | MO_ALIGN | MO_ATOM_IFALIGN;
2410 ARMMMUIdx armmemidx = s->nv2_mem_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2;
2411 int memidx = arm_to_core_mmu_idx(armmemidx);
2412 uint32_t syn;
2413
2414 mop |= (s->nv2_mem_be ? MO_BE : MO_LE);
2415
2416 tcg_gen_ld_i64(ptr, tcg_env, offsetof(CPUARMState, cp15.vncr_el2));
2417 tcg_gen_addi_i64(ptr, ptr,
2418 (ri->nv2_redirect_offset & ~NV2_REDIR_FLAG_MASK));
2419 tcg_rt = cpu_reg(s, rt);
2420
2421 syn = syn_data_abort_vncr(0, !isread, 0);
2422 disas_set_insn_syndrome(s, syn);
2423 if (isread) {
2424 tcg_gen_qemu_ld_i64(tcg_rt, ptr, memidx, mop);
2425 } else {
2426 tcg_gen_qemu_st_i64(tcg_rt, ptr, memidx, mop);
2427 }
2428 return;
2429 }
2430
2431 /* Handle special cases first */
2432 switch (ri->type & ARM_CP_SPECIAL_MASK) {
2433 case 0:
2434 break;
2435 case ARM_CP_NOP:
2436 return;
2437 case ARM_CP_NZCV:
2438 tcg_rt = cpu_reg(s, rt);
2439 if (isread) {
2440 gen_get_nzcv(tcg_rt);
2441 } else {
2442 gen_set_nzcv(tcg_rt);
2443 }
2444 return;
2445 case ARM_CP_CURRENTEL:
2446 {
2447 /*
2448 * Reads as current EL value from pstate, which is
2449 * guaranteed to be constant by the tb flags.
2450 * For nested virt we should report EL2.
2451 */
2452 int el = s->nv ? 2 : s->current_el;
2453 tcg_rt = cpu_reg(s, rt);
2454 tcg_gen_movi_i64(tcg_rt, el << 2);
2455 return;
2456 }
2457 case ARM_CP_DC_ZVA:
2458 /* Writes clear the aligned block of memory which rt points into. */
2459 if (s->mte_active[0]) {
2460 int desc = 0;
2461
2462 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
2463 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
2464 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
2465
2466 tcg_rt = tcg_temp_new_i64();
2467 gen_helper_mte_check_zva(tcg_rt, tcg_env,
2468 tcg_constant_i32(desc), cpu_reg(s, rt));
2469 } else {
2470 tcg_rt = clean_data_tbi(s, cpu_reg(s, rt));
2471 }
2472 gen_helper_dc_zva(tcg_env, tcg_rt);
2473 return;
2474 case ARM_CP_DC_GVA:
2475 {
2476 TCGv_i64 clean_addr, tag;
2477
2478 /*
2479 * DC_GVA, like DC_ZVA, requires that we supply the original
2480 * pointer for an invalid page. Probe that address first.
2481 */
2482 tcg_rt = cpu_reg(s, rt);
2483 clean_addr = clean_data_tbi(s, tcg_rt);
2484 gen_probe_access(s, clean_addr, MMU_DATA_STORE, MO_8);
2485
2486 if (s->ata[0]) {
2487 /* Extract the tag from the register to match STZGM. */
2488 tag = tcg_temp_new_i64();
2489 tcg_gen_shri_i64(tag, tcg_rt, 56);
2490 gen_helper_stzgm_tags(tcg_env, clean_addr, tag);
2491 }
2492 }
2493 return;
2494 case ARM_CP_DC_GZVA:
2495 {
2496 TCGv_i64 clean_addr, tag;
2497
2498 /* For DC_GZVA, we can rely on DC_ZVA for the proper fault. */
2499 tcg_rt = cpu_reg(s, rt);
2500 clean_addr = clean_data_tbi(s, tcg_rt);
2501 gen_helper_dc_zva(tcg_env, clean_addr);
2502
2503 if (s->ata[0]) {
2504 /* Extract the tag from the register to match STZGM. */
2505 tag = tcg_temp_new_i64();
2506 tcg_gen_shri_i64(tag, tcg_rt, 56);
2507 gen_helper_stzgm_tags(tcg_env, clean_addr, tag);
2508 }
2509 }
2510 return;
2511 default:
2512 g_assert_not_reached();
2513 }
2514
2515 if (ri->type & ARM_CP_IO) {
2516 /* I/O operations must end the TB here (whether read or write) */
2517 need_exit_tb = translator_io_start(&s->base);
2518 }
2519
2520 tcg_rt = cpu_reg(s, rt);
2521
2522 if (isread) {
2523 if (ri->type & ARM_CP_CONST) {
2524 tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
2525 } else if (ri->readfn) {
2526 if (!tcg_ri) {
2527 tcg_ri = gen_lookup_cp_reg(key);
2528 }
2529 gen_helper_get_cp_reg64(tcg_rt, tcg_env, tcg_ri);
2530 } else {
2531 tcg_gen_ld_i64(tcg_rt, tcg_env, ri->fieldoffset);
2532 }
2533 } else {
2534 if (ri->type & ARM_CP_CONST) {
2535 /* If not forbidden by access permissions, treat as WI */
2536 return;
2537 } else if (ri->writefn) {
2538 if (!tcg_ri) {
2539 tcg_ri = gen_lookup_cp_reg(key);
2540 }
2541 gen_helper_set_cp_reg64(tcg_env, tcg_ri, tcg_rt);
2542 } else {
2543 tcg_gen_st_i64(tcg_rt, tcg_env, ri->fieldoffset);
2544 }
2545 }
2546
2547 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
2548 /*
2549 * A write to any coprocessor register that ends a TB
2550 * must rebuild the hflags for the next TB.
2551 */
2552 gen_rebuild_hflags(s);
2553 /*
2554 * We default to ending the TB on a coprocessor register write,
2555 * but allow this to be suppressed by the register definition
2556 * (usually only necessary to work around guest bugs).
2557 */
2558 need_exit_tb = true;
2559 }
2560 if (need_exit_tb) {
2561 s->base.is_jmp = DISAS_UPDATE_EXIT;
2562 }
2563 }
2564
trans_SYS(DisasContext * s,arg_SYS * a)2565 static bool trans_SYS(DisasContext *s, arg_SYS *a)
2566 {
2567 handle_sys(s, a->l, a->op0, a->op1, a->op2, a->crn, a->crm, a->rt);
2568 return true;
2569 }
2570
trans_SVC(DisasContext * s,arg_i * a)2571 static bool trans_SVC(DisasContext *s, arg_i *a)
2572 {
2573 /*
2574 * For SVC, HVC and SMC we advance the single-step state
2575 * machine before taking the exception. This is architecturally
2576 * mandated, to ensure that single-stepping a system call
2577 * instruction works properly.
2578 */
2579 uint32_t syndrome = syn_aa64_svc(a->imm);
2580 if (s->fgt_svc) {
2581 gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2);
2582 return true;
2583 }
2584 gen_ss_advance(s);
2585 gen_exception_insn(s, 4, EXCP_SWI, syndrome);
2586 return true;
2587 }
2588
trans_HVC(DisasContext * s,arg_i * a)2589 static bool trans_HVC(DisasContext *s, arg_i *a)
2590 {
2591 int target_el = s->current_el == 3 ? 3 : 2;
2592
2593 if (s->current_el == 0) {
2594 unallocated_encoding(s);
2595 return true;
2596 }
2597 /*
2598 * The pre HVC helper handles cases when HVC gets trapped
2599 * as an undefined insn by runtime configuration.
2600 */
2601 gen_a64_update_pc(s, 0);
2602 gen_helper_pre_hvc(tcg_env);
2603 /* Architecture requires ss advance before we do the actual work */
2604 gen_ss_advance(s);
2605 gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(a->imm), target_el);
2606 return true;
2607 }
2608
trans_SMC(DisasContext * s,arg_i * a)2609 static bool trans_SMC(DisasContext *s, arg_i *a)
2610 {
2611 if (s->current_el == 0) {
2612 unallocated_encoding(s);
2613 return true;
2614 }
2615 gen_a64_update_pc(s, 0);
2616 gen_helper_pre_smc(tcg_env, tcg_constant_i32(syn_aa64_smc(a->imm)));
2617 /* Architecture requires ss advance before we do the actual work */
2618 gen_ss_advance(s);
2619 gen_exception_insn_el(s, 4, EXCP_SMC, syn_aa64_smc(a->imm), 3);
2620 return true;
2621 }
2622
trans_BRK(DisasContext * s,arg_i * a)2623 static bool trans_BRK(DisasContext *s, arg_i *a)
2624 {
2625 gen_exception_bkpt_insn(s, syn_aa64_bkpt(a->imm));
2626 return true;
2627 }
2628
trans_HLT(DisasContext * s,arg_i * a)2629 static bool trans_HLT(DisasContext *s, arg_i *a)
2630 {
2631 /*
2632 * HLT. This has two purposes.
2633 * Architecturally, it is an external halting debug instruction.
2634 * Since QEMU doesn't implement external debug, we treat this as
2635 * it is required for halting debug disabled: it will UNDEF.
2636 * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
2637 */
2638 if (semihosting_enabled(s->current_el == 0) && a->imm == 0xf000) {
2639 gen_exception_internal_insn(s, EXCP_SEMIHOST);
2640 } else {
2641 unallocated_encoding(s);
2642 }
2643 return true;
2644 }
2645
2646 /*
2647 * Load/Store exclusive instructions are implemented by remembering
2648 * the value/address loaded, and seeing if these are the same
2649 * when the store is performed. This is not actually the architecturally
2650 * mandated semantics, but it works for typical guest code sequences
2651 * and avoids having to monitor regular stores.
2652 *
2653 * The store exclusive uses the atomic cmpxchg primitives to avoid
2654 * races in multi-threaded linux-user and when MTTCG softmmu is
2655 * enabled.
2656 */
gen_load_exclusive(DisasContext * s,int rt,int rt2,int rn,int size,bool is_pair)2657 static void gen_load_exclusive(DisasContext *s, int rt, int rt2, int rn,
2658 int size, bool is_pair)
2659 {
2660 int idx = get_mem_index(s);
2661 TCGv_i64 dirty_addr, clean_addr;
2662 MemOp memop = check_atomic_align(s, rn, size + is_pair);
2663
2664 s->is_ldex = true;
2665 dirty_addr = cpu_reg_sp(s, rn);
2666 clean_addr = gen_mte_check1(s, dirty_addr, false, rn != 31, memop);
2667
2668 g_assert(size <= 3);
2669 if (is_pair) {
2670 g_assert(size >= 2);
2671 if (size == 2) {
2672 tcg_gen_qemu_ld_i64(cpu_exclusive_val, clean_addr, idx, memop);
2673 if (s->be_data == MO_LE) {
2674 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
2675 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
2676 } else {
2677 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
2678 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
2679 }
2680 } else {
2681 TCGv_i128 t16 = tcg_temp_new_i128();
2682
2683 tcg_gen_qemu_ld_i128(t16, clean_addr, idx, memop);
2684
2685 if (s->be_data == MO_LE) {
2686 tcg_gen_extr_i128_i64(cpu_exclusive_val,
2687 cpu_exclusive_high, t16);
2688 } else {
2689 tcg_gen_extr_i128_i64(cpu_exclusive_high,
2690 cpu_exclusive_val, t16);
2691 }
2692 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2693 tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
2694 }
2695 } else {
2696 tcg_gen_qemu_ld_i64(cpu_exclusive_val, clean_addr, idx, memop);
2697 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2698 }
2699 tcg_gen_mov_i64(cpu_exclusive_addr, clean_addr);
2700 }
2701
gen_store_exclusive(DisasContext * s,int rd,int rt,int rt2,int rn,int size,int is_pair)2702 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
2703 int rn, int size, int is_pair)
2704 {
2705 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2706 * && (!is_pair || env->exclusive_high == [addr + datasize])) {
2707 * [addr] = {Rt};
2708 * if (is_pair) {
2709 * [addr + datasize] = {Rt2};
2710 * }
2711 * {Rd} = 0;
2712 * } else {
2713 * {Rd} = 1;
2714 * }
2715 * env->exclusive_addr = -1;
2716 */
2717 TCGLabel *fail_label = gen_new_label();
2718 TCGLabel *done_label = gen_new_label();
2719 TCGv_i64 tmp, clean_addr;
2720 MemOp memop;
2721
2722 /*
2723 * FIXME: We are out of spec here. We have recorded only the address
2724 * from load_exclusive, not the entire range, and we assume that the
2725 * size of the access on both sides match. The architecture allows the
2726 * store to be smaller than the load, so long as the stored bytes are
2727 * within the range recorded by the load.
2728 */
2729
2730 /* See AArch64.ExclusiveMonitorsPass() and AArch64.IsExclusiveVA(). */
2731 clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2732 tcg_gen_brcond_i64(TCG_COND_NE, clean_addr, cpu_exclusive_addr, fail_label);
2733
2734 /*
2735 * The write, and any associated faults, only happen if the virtual
2736 * and physical addresses pass the exclusive monitor check. These
2737 * faults are exceedingly unlikely, because normally the guest uses
2738 * the exact same address register for the load_exclusive, and we
2739 * would have recognized these faults there.
2740 *
2741 * It is possible to trigger an alignment fault pre-LSE2, e.g. with an
2742 * unaligned 4-byte write within the range of an aligned 8-byte load.
2743 * With LSE2, the store would need to cross a 16-byte boundary when the
2744 * load did not, which would mean the store is outside the range
2745 * recorded for the monitor, which would have failed a corrected monitor
2746 * check above. For now, we assume no size change and retain the
2747 * MO_ALIGN to let tcg know what we checked in the load_exclusive.
2748 *
2749 * It is possible to trigger an MTE fault, by performing the load with
2750 * a virtual address with a valid tag and performing the store with the
2751 * same virtual address and a different invalid tag.
2752 */
2753 memop = size + is_pair;
2754 if (memop == MO_128 || !dc_isar_feature(aa64_lse2, s)) {
2755 memop |= MO_ALIGN;
2756 }
2757 memop = finalize_memop(s, memop);
2758 gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, memop);
2759
2760 tmp = tcg_temp_new_i64();
2761 if (is_pair) {
2762 if (size == 2) {
2763 if (s->be_data == MO_LE) {
2764 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
2765 } else {
2766 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
2767 }
2768 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
2769 cpu_exclusive_val, tmp,
2770 get_mem_index(s), memop);
2771 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2772 } else {
2773 TCGv_i128 t16 = tcg_temp_new_i128();
2774 TCGv_i128 c16 = tcg_temp_new_i128();
2775 TCGv_i64 a, b;
2776
2777 if (s->be_data == MO_LE) {
2778 tcg_gen_concat_i64_i128(t16, cpu_reg(s, rt), cpu_reg(s, rt2));
2779 tcg_gen_concat_i64_i128(c16, cpu_exclusive_val,
2780 cpu_exclusive_high);
2781 } else {
2782 tcg_gen_concat_i64_i128(t16, cpu_reg(s, rt2), cpu_reg(s, rt));
2783 tcg_gen_concat_i64_i128(c16, cpu_exclusive_high,
2784 cpu_exclusive_val);
2785 }
2786
2787 tcg_gen_atomic_cmpxchg_i128(t16, cpu_exclusive_addr, c16, t16,
2788 get_mem_index(s), memop);
2789
2790 a = tcg_temp_new_i64();
2791 b = tcg_temp_new_i64();
2792 if (s->be_data == MO_LE) {
2793 tcg_gen_extr_i128_i64(a, b, t16);
2794 } else {
2795 tcg_gen_extr_i128_i64(b, a, t16);
2796 }
2797
2798 tcg_gen_xor_i64(a, a, cpu_exclusive_val);
2799 tcg_gen_xor_i64(b, b, cpu_exclusive_high);
2800 tcg_gen_or_i64(tmp, a, b);
2801
2802 tcg_gen_setcondi_i64(TCG_COND_NE, tmp, tmp, 0);
2803 }
2804 } else {
2805 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2806 cpu_reg(s, rt), get_mem_index(s), memop);
2807 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2808 }
2809 tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2810 tcg_gen_br(done_label);
2811
2812 gen_set_label(fail_label);
2813 tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2814 gen_set_label(done_label);
2815 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2816 }
2817
gen_compare_and_swap(DisasContext * s,int rs,int rt,int rn,int size)2818 static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
2819 int rn, int size)
2820 {
2821 TCGv_i64 tcg_rs = cpu_reg(s, rs);
2822 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2823 int memidx = get_mem_index(s);
2824 TCGv_i64 clean_addr;
2825 MemOp memop;
2826
2827 if (rn == 31) {
2828 gen_check_sp_alignment(s);
2829 }
2830 memop = check_atomic_align(s, rn, size);
2831 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, memop);
2832 tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt,
2833 memidx, memop);
2834 }
2835
gen_compare_and_swap_pair(DisasContext * s,int rs,int rt,int rn,int size)2836 static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
2837 int rn, int size)
2838 {
2839 TCGv_i64 s1 = cpu_reg(s, rs);
2840 TCGv_i64 s2 = cpu_reg(s, rs + 1);
2841 TCGv_i64 t1 = cpu_reg(s, rt);
2842 TCGv_i64 t2 = cpu_reg(s, rt + 1);
2843 TCGv_i64 clean_addr;
2844 int memidx = get_mem_index(s);
2845 MemOp memop;
2846
2847 if (rn == 31) {
2848 gen_check_sp_alignment(s);
2849 }
2850
2851 /* This is a single atomic access, despite the "pair". */
2852 memop = check_atomic_align(s, rn, size + 1);
2853 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, memop);
2854
2855 if (size == 2) {
2856 TCGv_i64 cmp = tcg_temp_new_i64();
2857 TCGv_i64 val = tcg_temp_new_i64();
2858
2859 if (s->be_data == MO_LE) {
2860 tcg_gen_concat32_i64(val, t1, t2);
2861 tcg_gen_concat32_i64(cmp, s1, s2);
2862 } else {
2863 tcg_gen_concat32_i64(val, t2, t1);
2864 tcg_gen_concat32_i64(cmp, s2, s1);
2865 }
2866
2867 tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx, memop);
2868
2869 if (s->be_data == MO_LE) {
2870 tcg_gen_extr32_i64(s1, s2, cmp);
2871 } else {
2872 tcg_gen_extr32_i64(s2, s1, cmp);
2873 }
2874 } else {
2875 TCGv_i128 cmp = tcg_temp_new_i128();
2876 TCGv_i128 val = tcg_temp_new_i128();
2877
2878 if (s->be_data == MO_LE) {
2879 tcg_gen_concat_i64_i128(val, t1, t2);
2880 tcg_gen_concat_i64_i128(cmp, s1, s2);
2881 } else {
2882 tcg_gen_concat_i64_i128(val, t2, t1);
2883 tcg_gen_concat_i64_i128(cmp, s2, s1);
2884 }
2885
2886 tcg_gen_atomic_cmpxchg_i128(cmp, clean_addr, cmp, val, memidx, memop);
2887
2888 if (s->be_data == MO_LE) {
2889 tcg_gen_extr_i128_i64(s1, s2, cmp);
2890 } else {
2891 tcg_gen_extr_i128_i64(s2, s1, cmp);
2892 }
2893 }
2894 }
2895
2896 /*
2897 * Compute the ISS.SF bit for syndrome information if an exception
2898 * is taken on a load or store. This indicates whether the instruction
2899 * is accessing a 32-bit or 64-bit register. This logic is derived
2900 * from the ARMv8 specs for LDR (Shared decode for all encodings).
2901 */
ldst_iss_sf(int size,bool sign,bool ext)2902 static bool ldst_iss_sf(int size, bool sign, bool ext)
2903 {
2904
2905 if (sign) {
2906 /*
2907 * Signed loads are 64 bit results if we are not going to
2908 * do a zero-extend from 32 to 64 after the load.
2909 * (For a store, sign and ext are always false.)
2910 */
2911 return !ext;
2912 } else {
2913 /* Unsigned loads/stores work at the specified size */
2914 return size == MO_64;
2915 }
2916 }
2917
trans_STXR(DisasContext * s,arg_stxr * a)2918 static bool trans_STXR(DisasContext *s, arg_stxr *a)
2919 {
2920 if (a->rn == 31) {
2921 gen_check_sp_alignment(s);
2922 }
2923 if (a->lasr) {
2924 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2925 }
2926 gen_store_exclusive(s, a->rs, a->rt, a->rt2, a->rn, a->sz, false);
2927 return true;
2928 }
2929
trans_LDXR(DisasContext * s,arg_stxr * a)2930 static bool trans_LDXR(DisasContext *s, arg_stxr *a)
2931 {
2932 if (a->rn == 31) {
2933 gen_check_sp_alignment(s);
2934 }
2935 gen_load_exclusive(s, a->rt, a->rt2, a->rn, a->sz, false);
2936 if (a->lasr) {
2937 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2938 }
2939 return true;
2940 }
2941
trans_STLR(DisasContext * s,arg_stlr * a)2942 static bool trans_STLR(DisasContext *s, arg_stlr *a)
2943 {
2944 TCGv_i64 clean_addr;
2945 MemOp memop;
2946 bool iss_sf = ldst_iss_sf(a->sz, false, false);
2947
2948 /*
2949 * StoreLORelease is the same as Store-Release for QEMU, but
2950 * needs the feature-test.
2951 */
2952 if (!a->lasr && !dc_isar_feature(aa64_lor, s)) {
2953 return false;
2954 }
2955 /* Generate ISS for non-exclusive accesses including LASR. */
2956 if (a->rn == 31) {
2957 gen_check_sp_alignment(s);
2958 }
2959 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2960 memop = check_ordered_align(s, a->rn, 0, true, a->sz);
2961 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn),
2962 true, a->rn != 31, memop);
2963 do_gpr_st(s, cpu_reg(s, a->rt), clean_addr, memop, true, a->rt,
2964 iss_sf, a->lasr);
2965 return true;
2966 }
2967
trans_LDAR(DisasContext * s,arg_stlr * a)2968 static bool trans_LDAR(DisasContext *s, arg_stlr *a)
2969 {
2970 TCGv_i64 clean_addr;
2971 MemOp memop;
2972 bool iss_sf = ldst_iss_sf(a->sz, false, false);
2973
2974 /* LoadLOAcquire is the same as Load-Acquire for QEMU. */
2975 if (!a->lasr && !dc_isar_feature(aa64_lor, s)) {
2976 return false;
2977 }
2978 /* Generate ISS for non-exclusive accesses including LASR. */
2979 if (a->rn == 31) {
2980 gen_check_sp_alignment(s);
2981 }
2982 memop = check_ordered_align(s, a->rn, 0, false, a->sz);
2983 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn),
2984 false, a->rn != 31, memop);
2985 do_gpr_ld(s, cpu_reg(s, a->rt), clean_addr, memop, false, true,
2986 a->rt, iss_sf, a->lasr);
2987 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2988 return true;
2989 }
2990
trans_STXP(DisasContext * s,arg_stxr * a)2991 static bool trans_STXP(DisasContext *s, arg_stxr *a)
2992 {
2993 if (a->rn == 31) {
2994 gen_check_sp_alignment(s);
2995 }
2996 if (a->lasr) {
2997 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2998 }
2999 gen_store_exclusive(s, a->rs, a->rt, a->rt2, a->rn, a->sz, true);
3000 return true;
3001 }
3002
trans_LDXP(DisasContext * s,arg_stxr * a)3003 static bool trans_LDXP(DisasContext *s, arg_stxr *a)
3004 {
3005 if (a->rn == 31) {
3006 gen_check_sp_alignment(s);
3007 }
3008 gen_load_exclusive(s, a->rt, a->rt2, a->rn, a->sz, true);
3009 if (a->lasr) {
3010 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3011 }
3012 return true;
3013 }
3014
trans_CASP(DisasContext * s,arg_CASP * a)3015 static bool trans_CASP(DisasContext *s, arg_CASP *a)
3016 {
3017 if (!dc_isar_feature(aa64_atomics, s)) {
3018 return false;
3019 }
3020 if (((a->rt | a->rs) & 1) != 0) {
3021 return false;
3022 }
3023
3024 gen_compare_and_swap_pair(s, a->rs, a->rt, a->rn, a->sz);
3025 return true;
3026 }
3027
trans_CAS(DisasContext * s,arg_CAS * a)3028 static bool trans_CAS(DisasContext *s, arg_CAS *a)
3029 {
3030 if (!dc_isar_feature(aa64_atomics, s)) {
3031 return false;
3032 }
3033 gen_compare_and_swap(s, a->rs, a->rt, a->rn, a->sz);
3034 return true;
3035 }
3036
trans_LD_lit(DisasContext * s,arg_ldlit * a)3037 static bool trans_LD_lit(DisasContext *s, arg_ldlit *a)
3038 {
3039 bool iss_sf = ldst_iss_sf(a->sz, a->sign, false);
3040 TCGv_i64 tcg_rt = cpu_reg(s, a->rt);
3041 TCGv_i64 clean_addr = tcg_temp_new_i64();
3042 MemOp memop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
3043
3044 gen_pc_plus_diff(s, clean_addr, a->imm);
3045 do_gpr_ld(s, tcg_rt, clean_addr, memop,
3046 false, true, a->rt, iss_sf, false);
3047 return true;
3048 }
3049
trans_LD_lit_v(DisasContext * s,arg_ldlit * a)3050 static bool trans_LD_lit_v(DisasContext *s, arg_ldlit *a)
3051 {
3052 /* Load register (literal), vector version */
3053 TCGv_i64 clean_addr;
3054 MemOp memop;
3055
3056 if (!fp_access_check(s)) {
3057 return true;
3058 }
3059 memop = finalize_memop_asimd(s, a->sz);
3060 clean_addr = tcg_temp_new_i64();
3061 gen_pc_plus_diff(s, clean_addr, a->imm);
3062 do_fp_ld(s, a->rt, clean_addr, memop);
3063 return true;
3064 }
3065
op_addr_ldstpair_pre(DisasContext * s,arg_ldstpair * a,TCGv_i64 * clean_addr,TCGv_i64 * dirty_addr,uint64_t offset,bool is_store,MemOp mop)3066 static void op_addr_ldstpair_pre(DisasContext *s, arg_ldstpair *a,
3067 TCGv_i64 *clean_addr, TCGv_i64 *dirty_addr,
3068 uint64_t offset, bool is_store, MemOp mop)
3069 {
3070 if (a->rn == 31) {
3071 gen_check_sp_alignment(s);
3072 }
3073
3074 *dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
3075 if (!a->p) {
3076 tcg_gen_addi_i64(*dirty_addr, *dirty_addr, offset);
3077 }
3078
3079 *clean_addr = gen_mte_checkN(s, *dirty_addr, is_store,
3080 (a->w || a->rn != 31), 2 << a->sz, mop);
3081 }
3082
op_addr_ldstpair_post(DisasContext * s,arg_ldstpair * a,TCGv_i64 dirty_addr,uint64_t offset)3083 static void op_addr_ldstpair_post(DisasContext *s, arg_ldstpair *a,
3084 TCGv_i64 dirty_addr, uint64_t offset)
3085 {
3086 if (a->w) {
3087 if (a->p) {
3088 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3089 }
3090 tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), dirty_addr);
3091 }
3092 }
3093
trans_STP(DisasContext * s,arg_ldstpair * a)3094 static bool trans_STP(DisasContext *s, arg_ldstpair *a)
3095 {
3096 uint64_t offset = a->imm << a->sz;
3097 TCGv_i64 clean_addr, dirty_addr, tcg_rt, tcg_rt2;
3098 MemOp mop = finalize_memop(s, a->sz);
3099
3100 op_addr_ldstpair_pre(s, a, &clean_addr, &dirty_addr, offset, true, mop);
3101 tcg_rt = cpu_reg(s, a->rt);
3102 tcg_rt2 = cpu_reg(s, a->rt2);
3103 /*
3104 * We built mop above for the single logical access -- rebuild it
3105 * now for the paired operation.
3106 *
3107 * With LSE2, non-sign-extending pairs are treated atomically if
3108 * aligned, and if unaligned one of the pair will be completely
3109 * within a 16-byte block and that element will be atomic.
3110 * Otherwise each element is separately atomic.
3111 * In all cases, issue one operation with the correct atomicity.
3112 */
3113 mop = a->sz + 1;
3114 if (s->align_mem) {
3115 mop |= (a->sz == 2 ? MO_ALIGN_4 : MO_ALIGN_8);
3116 }
3117 mop = finalize_memop_pair(s, mop);
3118 if (a->sz == 2) {
3119 TCGv_i64 tmp = tcg_temp_new_i64();
3120
3121 if (s->be_data == MO_LE) {
3122 tcg_gen_concat32_i64(tmp, tcg_rt, tcg_rt2);
3123 } else {
3124 tcg_gen_concat32_i64(tmp, tcg_rt2, tcg_rt);
3125 }
3126 tcg_gen_qemu_st_i64(tmp, clean_addr, get_mem_index(s), mop);
3127 } else {
3128 TCGv_i128 tmp = tcg_temp_new_i128();
3129
3130 if (s->be_data == MO_LE) {
3131 tcg_gen_concat_i64_i128(tmp, tcg_rt, tcg_rt2);
3132 } else {
3133 tcg_gen_concat_i64_i128(tmp, tcg_rt2, tcg_rt);
3134 }
3135 tcg_gen_qemu_st_i128(tmp, clean_addr, get_mem_index(s), mop);
3136 }
3137 op_addr_ldstpair_post(s, a, dirty_addr, offset);
3138 return true;
3139 }
3140
trans_LDP(DisasContext * s,arg_ldstpair * a)3141 static bool trans_LDP(DisasContext *s, arg_ldstpair *a)
3142 {
3143 uint64_t offset = a->imm << a->sz;
3144 TCGv_i64 clean_addr, dirty_addr, tcg_rt, tcg_rt2;
3145 MemOp mop = finalize_memop(s, a->sz);
3146
3147 op_addr_ldstpair_pre(s, a, &clean_addr, &dirty_addr, offset, false, mop);
3148 tcg_rt = cpu_reg(s, a->rt);
3149 tcg_rt2 = cpu_reg(s, a->rt2);
3150
3151 /*
3152 * We built mop above for the single logical access -- rebuild it
3153 * now for the paired operation.
3154 *
3155 * With LSE2, non-sign-extending pairs are treated atomically if
3156 * aligned, and if unaligned one of the pair will be completely
3157 * within a 16-byte block and that element will be atomic.
3158 * Otherwise each element is separately atomic.
3159 * In all cases, issue one operation with the correct atomicity.
3160 *
3161 * This treats sign-extending loads like zero-extending loads,
3162 * since that reuses the most code below.
3163 */
3164 mop = a->sz + 1;
3165 if (s->align_mem) {
3166 mop |= (a->sz == 2 ? MO_ALIGN_4 : MO_ALIGN_8);
3167 }
3168 mop = finalize_memop_pair(s, mop);
3169 if (a->sz == 2) {
3170 int o2 = s->be_data == MO_LE ? 32 : 0;
3171 int o1 = o2 ^ 32;
3172
3173 tcg_gen_qemu_ld_i64(tcg_rt, clean_addr, get_mem_index(s), mop);
3174 if (a->sign) {
3175 tcg_gen_sextract_i64(tcg_rt2, tcg_rt, o2, 32);
3176 tcg_gen_sextract_i64(tcg_rt, tcg_rt, o1, 32);
3177 } else {
3178 tcg_gen_extract_i64(tcg_rt2, tcg_rt, o2, 32);
3179 tcg_gen_extract_i64(tcg_rt, tcg_rt, o1, 32);
3180 }
3181 } else {
3182 TCGv_i128 tmp = tcg_temp_new_i128();
3183
3184 tcg_gen_qemu_ld_i128(tmp, clean_addr, get_mem_index(s), mop);
3185 if (s->be_data == MO_LE) {
3186 tcg_gen_extr_i128_i64(tcg_rt, tcg_rt2, tmp);
3187 } else {
3188 tcg_gen_extr_i128_i64(tcg_rt2, tcg_rt, tmp);
3189 }
3190 }
3191 op_addr_ldstpair_post(s, a, dirty_addr, offset);
3192 return true;
3193 }
3194
trans_STP_v(DisasContext * s,arg_ldstpair * a)3195 static bool trans_STP_v(DisasContext *s, arg_ldstpair *a)
3196 {
3197 uint64_t offset = a->imm << a->sz;
3198 TCGv_i64 clean_addr, dirty_addr;
3199 MemOp mop;
3200
3201 if (!fp_access_check(s)) {
3202 return true;
3203 }
3204
3205 /* LSE2 does not merge FP pairs; leave these as separate operations. */
3206 mop = finalize_memop_asimd(s, a->sz);
3207 op_addr_ldstpair_pre(s, a, &clean_addr, &dirty_addr, offset, true, mop);
3208 do_fp_st(s, a->rt, clean_addr, mop);
3209 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << a->sz);
3210 do_fp_st(s, a->rt2, clean_addr, mop);
3211 op_addr_ldstpair_post(s, a, dirty_addr, offset);
3212 return true;
3213 }
3214
trans_LDP_v(DisasContext * s,arg_ldstpair * a)3215 static bool trans_LDP_v(DisasContext *s, arg_ldstpair *a)
3216 {
3217 uint64_t offset = a->imm << a->sz;
3218 TCGv_i64 clean_addr, dirty_addr;
3219 MemOp mop;
3220
3221 if (!fp_access_check(s)) {
3222 return true;
3223 }
3224
3225 /* LSE2 does not merge FP pairs; leave these as separate operations. */
3226 mop = finalize_memop_asimd(s, a->sz);
3227 op_addr_ldstpair_pre(s, a, &clean_addr, &dirty_addr, offset, false, mop);
3228 do_fp_ld(s, a->rt, clean_addr, mop);
3229 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << a->sz);
3230 do_fp_ld(s, a->rt2, clean_addr, mop);
3231 op_addr_ldstpair_post(s, a, dirty_addr, offset);
3232 return true;
3233 }
3234
trans_STGP(DisasContext * s,arg_ldstpair * a)3235 static bool trans_STGP(DisasContext *s, arg_ldstpair *a)
3236 {
3237 TCGv_i64 clean_addr, dirty_addr, tcg_rt, tcg_rt2;
3238 uint64_t offset = a->imm << LOG2_TAG_GRANULE;
3239 MemOp mop;
3240 TCGv_i128 tmp;
3241
3242 /* STGP only comes in one size. */
3243 tcg_debug_assert(a->sz == MO_64);
3244
3245 if (!dc_isar_feature(aa64_mte_insn_reg, s)) {
3246 return false;
3247 }
3248
3249 if (a->rn == 31) {
3250 gen_check_sp_alignment(s);
3251 }
3252
3253 dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
3254 if (!a->p) {
3255 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3256 }
3257
3258 clean_addr = clean_data_tbi(s, dirty_addr);
3259 tcg_rt = cpu_reg(s, a->rt);
3260 tcg_rt2 = cpu_reg(s, a->rt2);
3261
3262 /*
3263 * STGP is defined as two 8-byte memory operations, aligned to TAG_GRANULE,
3264 * and one tag operation. We implement it as one single aligned 16-byte
3265 * memory operation for convenience. Note that the alignment ensures
3266 * MO_ATOM_IFALIGN_PAIR produces 8-byte atomicity for the memory store.
3267 */
3268 mop = finalize_memop_atom(s, MO_128 | MO_ALIGN, MO_ATOM_IFALIGN_PAIR);
3269
3270 tmp = tcg_temp_new_i128();
3271 if (s->be_data == MO_LE) {
3272 tcg_gen_concat_i64_i128(tmp, tcg_rt, tcg_rt2);
3273 } else {
3274 tcg_gen_concat_i64_i128(tmp, tcg_rt2, tcg_rt);
3275 }
3276 tcg_gen_qemu_st_i128(tmp, clean_addr, get_mem_index(s), mop);
3277
3278 /* Perform the tag store, if tag access enabled. */
3279 if (s->ata[0]) {
3280 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3281 gen_helper_stg_parallel(tcg_env, dirty_addr, dirty_addr);
3282 } else {
3283 gen_helper_stg(tcg_env, dirty_addr, dirty_addr);
3284 }
3285 }
3286
3287 op_addr_ldstpair_post(s, a, dirty_addr, offset);
3288 return true;
3289 }
3290
op_addr_ldst_imm_pre(DisasContext * s,arg_ldst_imm * a,TCGv_i64 * clean_addr,TCGv_i64 * dirty_addr,uint64_t offset,bool is_store,MemOp mop)3291 static void op_addr_ldst_imm_pre(DisasContext *s, arg_ldst_imm *a,
3292 TCGv_i64 *clean_addr, TCGv_i64 *dirty_addr,
3293 uint64_t offset, bool is_store, MemOp mop)
3294 {
3295 int memidx;
3296
3297 if (a->rn == 31) {
3298 gen_check_sp_alignment(s);
3299 }
3300
3301 *dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
3302 if (!a->p) {
3303 tcg_gen_addi_i64(*dirty_addr, *dirty_addr, offset);
3304 }
3305 memidx = get_a64_user_mem_index(s, a->unpriv);
3306 *clean_addr = gen_mte_check1_mmuidx(s, *dirty_addr, is_store,
3307 a->w || a->rn != 31,
3308 mop, a->unpriv, memidx);
3309 }
3310
op_addr_ldst_imm_post(DisasContext * s,arg_ldst_imm * a,TCGv_i64 dirty_addr,uint64_t offset)3311 static void op_addr_ldst_imm_post(DisasContext *s, arg_ldst_imm *a,
3312 TCGv_i64 dirty_addr, uint64_t offset)
3313 {
3314 if (a->w) {
3315 if (a->p) {
3316 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3317 }
3318 tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), dirty_addr);
3319 }
3320 }
3321
trans_STR_i(DisasContext * s,arg_ldst_imm * a)3322 static bool trans_STR_i(DisasContext *s, arg_ldst_imm *a)
3323 {
3324 bool iss_sf, iss_valid = !a->w;
3325 TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3326 int memidx = get_a64_user_mem_index(s, a->unpriv);
3327 MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
3328
3329 op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, true, mop);
3330
3331 tcg_rt = cpu_reg(s, a->rt);
3332 iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
3333
3334 do_gpr_st_memidx(s, tcg_rt, clean_addr, mop, memidx,
3335 iss_valid, a->rt, iss_sf, false);
3336 op_addr_ldst_imm_post(s, a, dirty_addr, a->imm);
3337 return true;
3338 }
3339
trans_LDR_i(DisasContext * s,arg_ldst_imm * a)3340 static bool trans_LDR_i(DisasContext *s, arg_ldst_imm *a)
3341 {
3342 bool iss_sf, iss_valid = !a->w;
3343 TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3344 int memidx = get_a64_user_mem_index(s, a->unpriv);
3345 MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
3346
3347 op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, false, mop);
3348
3349 tcg_rt = cpu_reg(s, a->rt);
3350 iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
3351
3352 do_gpr_ld_memidx(s, tcg_rt, clean_addr, mop,
3353 a->ext, memidx, iss_valid, a->rt, iss_sf, false);
3354 op_addr_ldst_imm_post(s, a, dirty_addr, a->imm);
3355 return true;
3356 }
3357
trans_STR_v_i(DisasContext * s,arg_ldst_imm * a)3358 static bool trans_STR_v_i(DisasContext *s, arg_ldst_imm *a)
3359 {
3360 TCGv_i64 clean_addr, dirty_addr;
3361 MemOp mop;
3362
3363 if (!fp_access_check(s)) {
3364 return true;
3365 }
3366 mop = finalize_memop_asimd(s, a->sz);
3367 op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, true, mop);
3368 do_fp_st(s, a->rt, clean_addr, mop);
3369 op_addr_ldst_imm_post(s, a, dirty_addr, a->imm);
3370 return true;
3371 }
3372
trans_LDR_v_i(DisasContext * s,arg_ldst_imm * a)3373 static bool trans_LDR_v_i(DisasContext *s, arg_ldst_imm *a)
3374 {
3375 TCGv_i64 clean_addr, dirty_addr;
3376 MemOp mop;
3377
3378 if (!fp_access_check(s)) {
3379 return true;
3380 }
3381 mop = finalize_memop_asimd(s, a->sz);
3382 op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, false, mop);
3383 do_fp_ld(s, a->rt, clean_addr, mop);
3384 op_addr_ldst_imm_post(s, a, dirty_addr, a->imm);
3385 return true;
3386 }
3387
op_addr_ldst_pre(DisasContext * s,arg_ldst * a,TCGv_i64 * clean_addr,TCGv_i64 * dirty_addr,bool is_store,MemOp memop)3388 static void op_addr_ldst_pre(DisasContext *s, arg_ldst *a,
3389 TCGv_i64 *clean_addr, TCGv_i64 *dirty_addr,
3390 bool is_store, MemOp memop)
3391 {
3392 TCGv_i64 tcg_rm;
3393
3394 if (a->rn == 31) {
3395 gen_check_sp_alignment(s);
3396 }
3397 *dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
3398
3399 tcg_rm = read_cpu_reg(s, a->rm, 1);
3400 ext_and_shift_reg(tcg_rm, tcg_rm, a->opt, a->s ? a->sz : 0);
3401
3402 tcg_gen_add_i64(*dirty_addr, *dirty_addr, tcg_rm);
3403 *clean_addr = gen_mte_check1(s, *dirty_addr, is_store, true, memop);
3404 }
3405
trans_LDR(DisasContext * s,arg_ldst * a)3406 static bool trans_LDR(DisasContext *s, arg_ldst *a)
3407 {
3408 TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3409 bool iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
3410 MemOp memop;
3411
3412 if (extract32(a->opt, 1, 1) == 0) {
3413 return false;
3414 }
3415
3416 memop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
3417 op_addr_ldst_pre(s, a, &clean_addr, &dirty_addr, false, memop);
3418 tcg_rt = cpu_reg(s, a->rt);
3419 do_gpr_ld(s, tcg_rt, clean_addr, memop,
3420 a->ext, true, a->rt, iss_sf, false);
3421 return true;
3422 }
3423
trans_STR(DisasContext * s,arg_ldst * a)3424 static bool trans_STR(DisasContext *s, arg_ldst *a)
3425 {
3426 TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3427 bool iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
3428 MemOp memop;
3429
3430 if (extract32(a->opt, 1, 1) == 0) {
3431 return false;
3432 }
3433
3434 memop = finalize_memop(s, a->sz);
3435 op_addr_ldst_pre(s, a, &clean_addr, &dirty_addr, true, memop);
3436 tcg_rt = cpu_reg(s, a->rt);
3437 do_gpr_st(s, tcg_rt, clean_addr, memop, true, a->rt, iss_sf, false);
3438 return true;
3439 }
3440
trans_LDR_v(DisasContext * s,arg_ldst * a)3441 static bool trans_LDR_v(DisasContext *s, arg_ldst *a)
3442 {
3443 TCGv_i64 clean_addr, dirty_addr;
3444 MemOp memop;
3445
3446 if (extract32(a->opt, 1, 1) == 0) {
3447 return false;
3448 }
3449
3450 if (!fp_access_check(s)) {
3451 return true;
3452 }
3453
3454 memop = finalize_memop_asimd(s, a->sz);
3455 op_addr_ldst_pre(s, a, &clean_addr, &dirty_addr, false, memop);
3456 do_fp_ld(s, a->rt, clean_addr, memop);
3457 return true;
3458 }
3459
trans_STR_v(DisasContext * s,arg_ldst * a)3460 static bool trans_STR_v(DisasContext *s, arg_ldst *a)
3461 {
3462 TCGv_i64 clean_addr, dirty_addr;
3463 MemOp memop;
3464
3465 if (extract32(a->opt, 1, 1) == 0) {
3466 return false;
3467 }
3468
3469 if (!fp_access_check(s)) {
3470 return true;
3471 }
3472
3473 memop = finalize_memop_asimd(s, a->sz);
3474 op_addr_ldst_pre(s, a, &clean_addr, &dirty_addr, true, memop);
3475 do_fp_st(s, a->rt, clean_addr, memop);
3476 return true;
3477 }
3478
3479
do_atomic_ld(DisasContext * s,arg_atomic * a,AtomicThreeOpFn * fn,int sign,bool invert)3480 static bool do_atomic_ld(DisasContext *s, arg_atomic *a, AtomicThreeOpFn *fn,
3481 int sign, bool invert)
3482 {
3483 MemOp mop = a->sz | sign;
3484 TCGv_i64 clean_addr, tcg_rs, tcg_rt;
3485
3486 if (a->rn == 31) {
3487 gen_check_sp_alignment(s);
3488 }
3489 mop = check_atomic_align(s, a->rn, mop);
3490 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn), false,
3491 a->rn != 31, mop);
3492 tcg_rs = read_cpu_reg(s, a->rs, true);
3493 tcg_rt = cpu_reg(s, a->rt);
3494 if (invert) {
3495 tcg_gen_not_i64(tcg_rs, tcg_rs);
3496 }
3497 /*
3498 * The tcg atomic primitives are all full barriers. Therefore we
3499 * can ignore the Acquire and Release bits of this instruction.
3500 */
3501 fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop);
3502
3503 if (mop & MO_SIGN) {
3504 switch (a->sz) {
3505 case MO_8:
3506 tcg_gen_ext8u_i64(tcg_rt, tcg_rt);
3507 break;
3508 case MO_16:
3509 tcg_gen_ext16u_i64(tcg_rt, tcg_rt);
3510 break;
3511 case MO_32:
3512 tcg_gen_ext32u_i64(tcg_rt, tcg_rt);
3513 break;
3514 case MO_64:
3515 break;
3516 default:
3517 g_assert_not_reached();
3518 }
3519 }
3520 return true;
3521 }
3522
3523 TRANS_FEAT(LDADD, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_add_i64, 0, false)
3524 TRANS_FEAT(LDCLR, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_and_i64, 0, true)
3525 TRANS_FEAT(LDEOR, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_xor_i64, 0, false)
3526 TRANS_FEAT(LDSET, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_or_i64, 0, false)
TRANS_FEAT(LDSMAX,aa64_atomics,do_atomic_ld,a,tcg_gen_atomic_fetch_smax_i64,MO_SIGN,false)3527 TRANS_FEAT(LDSMAX, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_smax_i64, MO_SIGN, false)
3528 TRANS_FEAT(LDSMIN, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_smin_i64, MO_SIGN, false)
3529 TRANS_FEAT(LDUMAX, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_umax_i64, 0, false)
3530 TRANS_FEAT(LDUMIN, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_umin_i64, 0, false)
3531 TRANS_FEAT(SWP, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_xchg_i64, 0, false)
3532
3533 static bool trans_LDAPR(DisasContext *s, arg_LDAPR *a)
3534 {
3535 bool iss_sf = ldst_iss_sf(a->sz, false, false);
3536 TCGv_i64 clean_addr;
3537 MemOp mop;
3538
3539 if (!dc_isar_feature(aa64_atomics, s) ||
3540 !dc_isar_feature(aa64_rcpc_8_3, s)) {
3541 return false;
3542 }
3543 if (a->rn == 31) {
3544 gen_check_sp_alignment(s);
3545 }
3546 mop = check_atomic_align(s, a->rn, a->sz);
3547 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn), false,
3548 a->rn != 31, mop);
3549 /*
3550 * LDAPR* are a special case because they are a simple load, not a
3551 * fetch-and-do-something op.
3552 * The architectural consistency requirements here are weaker than
3553 * full load-acquire (we only need "load-acquire processor consistent"),
3554 * but we choose to implement them as full LDAQ.
3555 */
3556 do_gpr_ld(s, cpu_reg(s, a->rt), clean_addr, mop, false,
3557 true, a->rt, iss_sf, true);
3558 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3559 return true;
3560 }
3561
trans_LDRA(DisasContext * s,arg_LDRA * a)3562 static bool trans_LDRA(DisasContext *s, arg_LDRA *a)
3563 {
3564 TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3565 MemOp memop;
3566
3567 /* Load with pointer authentication */
3568 if (!dc_isar_feature(aa64_pauth, s)) {
3569 return false;
3570 }
3571
3572 if (a->rn == 31) {
3573 gen_check_sp_alignment(s);
3574 }
3575 dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
3576
3577 if (s->pauth_active) {
3578 if (!a->m) {
3579 gen_helper_autda_combined(dirty_addr, tcg_env, dirty_addr,
3580 tcg_constant_i64(0));
3581 } else {
3582 gen_helper_autdb_combined(dirty_addr, tcg_env, dirty_addr,
3583 tcg_constant_i64(0));
3584 }
3585 }
3586
3587 tcg_gen_addi_i64(dirty_addr, dirty_addr, a->imm);
3588
3589 memop = finalize_memop(s, MO_64);
3590
3591 /* Note that "clean" and "dirty" here refer to TBI not PAC. */
3592 clean_addr = gen_mte_check1(s, dirty_addr, false,
3593 a->w || a->rn != 31, memop);
3594
3595 tcg_rt = cpu_reg(s, a->rt);
3596 do_gpr_ld(s, tcg_rt, clean_addr, memop,
3597 /* extend */ false, /* iss_valid */ !a->w,
3598 /* iss_srt */ a->rt, /* iss_sf */ true, /* iss_ar */ false);
3599
3600 if (a->w) {
3601 tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), dirty_addr);
3602 }
3603 return true;
3604 }
3605
trans_LDAPR_i(DisasContext * s,arg_ldapr_stlr_i * a)3606 static bool trans_LDAPR_i(DisasContext *s, arg_ldapr_stlr_i *a)
3607 {
3608 TCGv_i64 clean_addr, dirty_addr;
3609 MemOp mop = a->sz | (a->sign ? MO_SIGN : 0);
3610 bool iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
3611
3612 if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
3613 return false;
3614 }
3615
3616 if (a->rn == 31) {
3617 gen_check_sp_alignment(s);
3618 }
3619
3620 mop = check_ordered_align(s, a->rn, a->imm, false, mop);
3621 dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
3622 tcg_gen_addi_i64(dirty_addr, dirty_addr, a->imm);
3623 clean_addr = clean_data_tbi(s, dirty_addr);
3624
3625 /*
3626 * Load-AcquirePC semantics; we implement as the slightly more
3627 * restrictive Load-Acquire.
3628 */
3629 do_gpr_ld(s, cpu_reg(s, a->rt), clean_addr, mop, a->ext, true,
3630 a->rt, iss_sf, true);
3631 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3632 return true;
3633 }
3634
trans_STLR_i(DisasContext * s,arg_ldapr_stlr_i * a)3635 static bool trans_STLR_i(DisasContext *s, arg_ldapr_stlr_i *a)
3636 {
3637 TCGv_i64 clean_addr, dirty_addr;
3638 MemOp mop = a->sz;
3639 bool iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
3640
3641 if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
3642 return false;
3643 }
3644
3645 /* TODO: ARMv8.4-LSE SCTLR.nAA */
3646
3647 if (a->rn == 31) {
3648 gen_check_sp_alignment(s);
3649 }
3650
3651 mop = check_ordered_align(s, a->rn, a->imm, true, mop);
3652 dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
3653 tcg_gen_addi_i64(dirty_addr, dirty_addr, a->imm);
3654 clean_addr = clean_data_tbi(s, dirty_addr);
3655
3656 /* Store-Release semantics */
3657 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
3658 do_gpr_st(s, cpu_reg(s, a->rt), clean_addr, mop, true, a->rt, iss_sf, true);
3659 return true;
3660 }
3661
trans_LD_mult(DisasContext * s,arg_ldst_mult * a)3662 static bool trans_LD_mult(DisasContext *s, arg_ldst_mult *a)
3663 {
3664 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3665 MemOp endian, align, mop;
3666
3667 int total; /* total bytes */
3668 int elements; /* elements per vector */
3669 int r;
3670 int size = a->sz;
3671
3672 if (!a->p && a->rm != 0) {
3673 /* For non-postindexed accesses the Rm field must be 0 */
3674 return false;
3675 }
3676 if (size == 3 && !a->q && a->selem != 1) {
3677 return false;
3678 }
3679 if (!fp_access_check(s)) {
3680 return true;
3681 }
3682
3683 if (a->rn == 31) {
3684 gen_check_sp_alignment(s);
3685 }
3686
3687 /* For our purposes, bytes are always little-endian. */
3688 endian = s->be_data;
3689 if (size == 0) {
3690 endian = MO_LE;
3691 }
3692
3693 total = a->rpt * a->selem * (a->q ? 16 : 8);
3694 tcg_rn = cpu_reg_sp(s, a->rn);
3695
3696 /*
3697 * Issue the MTE check vs the logical repeat count, before we
3698 * promote consecutive little-endian elements below.
3699 */
3700 clean_addr = gen_mte_checkN(s, tcg_rn, false, a->p || a->rn != 31, total,
3701 finalize_memop_asimd(s, size));
3702
3703 /*
3704 * Consecutive little-endian elements from a single register
3705 * can be promoted to a larger little-endian operation.
3706 */
3707 align = MO_ALIGN;
3708 if (a->selem == 1 && endian == MO_LE) {
3709 align = pow2_align(size);
3710 size = 3;
3711 }
3712 if (!s->align_mem) {
3713 align = 0;
3714 }
3715 mop = endian | size | align;
3716
3717 elements = (a->q ? 16 : 8) >> size;
3718 tcg_ebytes = tcg_constant_i64(1 << size);
3719 for (r = 0; r < a->rpt; r++) {
3720 int e;
3721 for (e = 0; e < elements; e++) {
3722 int xs;
3723 for (xs = 0; xs < a->selem; xs++) {
3724 int tt = (a->rt + r + xs) % 32;
3725 do_vec_ld(s, tt, e, clean_addr, mop);
3726 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3727 }
3728 }
3729 }
3730
3731 /*
3732 * For non-quad operations, setting a slice of the low 64 bits of
3733 * the register clears the high 64 bits (in the ARM ARM pseudocode
3734 * this is implicit in the fact that 'rval' is a 64 bit wide
3735 * variable). For quad operations, we might still need to zero
3736 * the high bits of SVE.
3737 */
3738 for (r = 0; r < a->rpt * a->selem; r++) {
3739 int tt = (a->rt + r) % 32;
3740 clear_vec_high(s, a->q, tt);
3741 }
3742
3743 if (a->p) {
3744 if (a->rm == 31) {
3745 tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3746 } else {
3747 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm));
3748 }
3749 }
3750 return true;
3751 }
3752
trans_ST_mult(DisasContext * s,arg_ldst_mult * a)3753 static bool trans_ST_mult(DisasContext *s, arg_ldst_mult *a)
3754 {
3755 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3756 MemOp endian, align, mop;
3757
3758 int total; /* total bytes */
3759 int elements; /* elements per vector */
3760 int r;
3761 int size = a->sz;
3762
3763 if (!a->p && a->rm != 0) {
3764 /* For non-postindexed accesses the Rm field must be 0 */
3765 return false;
3766 }
3767 if (size == 3 && !a->q && a->selem != 1) {
3768 return false;
3769 }
3770 if (!fp_access_check(s)) {
3771 return true;
3772 }
3773
3774 if (a->rn == 31) {
3775 gen_check_sp_alignment(s);
3776 }
3777
3778 /* For our purposes, bytes are always little-endian. */
3779 endian = s->be_data;
3780 if (size == 0) {
3781 endian = MO_LE;
3782 }
3783
3784 total = a->rpt * a->selem * (a->q ? 16 : 8);
3785 tcg_rn = cpu_reg_sp(s, a->rn);
3786
3787 /*
3788 * Issue the MTE check vs the logical repeat count, before we
3789 * promote consecutive little-endian elements below.
3790 */
3791 clean_addr = gen_mte_checkN(s, tcg_rn, true, a->p || a->rn != 31, total,
3792 finalize_memop_asimd(s, size));
3793
3794 /*
3795 * Consecutive little-endian elements from a single register
3796 * can be promoted to a larger little-endian operation.
3797 */
3798 align = MO_ALIGN;
3799 if (a->selem == 1 && endian == MO_LE) {
3800 align = pow2_align(size);
3801 size = 3;
3802 }
3803 if (!s->align_mem) {
3804 align = 0;
3805 }
3806 mop = endian | size | align;
3807
3808 elements = (a->q ? 16 : 8) >> size;
3809 tcg_ebytes = tcg_constant_i64(1 << size);
3810 for (r = 0; r < a->rpt; r++) {
3811 int e;
3812 for (e = 0; e < elements; e++) {
3813 int xs;
3814 for (xs = 0; xs < a->selem; xs++) {
3815 int tt = (a->rt + r + xs) % 32;
3816 do_vec_st(s, tt, e, clean_addr, mop);
3817 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3818 }
3819 }
3820 }
3821
3822 if (a->p) {
3823 if (a->rm == 31) {
3824 tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3825 } else {
3826 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm));
3827 }
3828 }
3829 return true;
3830 }
3831
trans_ST_single(DisasContext * s,arg_ldst_single * a)3832 static bool trans_ST_single(DisasContext *s, arg_ldst_single *a)
3833 {
3834 int xs, total, rt;
3835 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3836 MemOp mop;
3837
3838 if (!a->p && a->rm != 0) {
3839 return false;
3840 }
3841 if (!fp_access_check(s)) {
3842 return true;
3843 }
3844
3845 if (a->rn == 31) {
3846 gen_check_sp_alignment(s);
3847 }
3848
3849 total = a->selem << a->scale;
3850 tcg_rn = cpu_reg_sp(s, a->rn);
3851
3852 mop = finalize_memop_asimd(s, a->scale);
3853 clean_addr = gen_mte_checkN(s, tcg_rn, true, a->p || a->rn != 31,
3854 total, mop);
3855
3856 tcg_ebytes = tcg_constant_i64(1 << a->scale);
3857 for (xs = 0, rt = a->rt; xs < a->selem; xs++, rt = (rt + 1) % 32) {
3858 do_vec_st(s, rt, a->index, clean_addr, mop);
3859 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3860 }
3861
3862 if (a->p) {
3863 if (a->rm == 31) {
3864 tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3865 } else {
3866 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm));
3867 }
3868 }
3869 return true;
3870 }
3871
trans_LD_single(DisasContext * s,arg_ldst_single * a)3872 static bool trans_LD_single(DisasContext *s, arg_ldst_single *a)
3873 {
3874 int xs, total, rt;
3875 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3876 MemOp mop;
3877
3878 if (!a->p && a->rm != 0) {
3879 return false;
3880 }
3881 if (!fp_access_check(s)) {
3882 return true;
3883 }
3884
3885 if (a->rn == 31) {
3886 gen_check_sp_alignment(s);
3887 }
3888
3889 total = a->selem << a->scale;
3890 tcg_rn = cpu_reg_sp(s, a->rn);
3891
3892 mop = finalize_memop_asimd(s, a->scale);
3893 clean_addr = gen_mte_checkN(s, tcg_rn, false, a->p || a->rn != 31,
3894 total, mop);
3895
3896 tcg_ebytes = tcg_constant_i64(1 << a->scale);
3897 for (xs = 0, rt = a->rt; xs < a->selem; xs++, rt = (rt + 1) % 32) {
3898 do_vec_ld(s, rt, a->index, clean_addr, mop);
3899 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3900 }
3901
3902 if (a->p) {
3903 if (a->rm == 31) {
3904 tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3905 } else {
3906 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm));
3907 }
3908 }
3909 return true;
3910 }
3911
trans_LD_single_repl(DisasContext * s,arg_LD_single_repl * a)3912 static bool trans_LD_single_repl(DisasContext *s, arg_LD_single_repl *a)
3913 {
3914 int xs, total, rt;
3915 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3916 MemOp mop;
3917
3918 if (!a->p && a->rm != 0) {
3919 return false;
3920 }
3921 if (!fp_access_check(s)) {
3922 return true;
3923 }
3924
3925 if (a->rn == 31) {
3926 gen_check_sp_alignment(s);
3927 }
3928
3929 total = a->selem << a->scale;
3930 tcg_rn = cpu_reg_sp(s, a->rn);
3931
3932 mop = finalize_memop_asimd(s, a->scale);
3933 clean_addr = gen_mte_checkN(s, tcg_rn, false, a->p || a->rn != 31,
3934 total, mop);
3935
3936 tcg_ebytes = tcg_constant_i64(1 << a->scale);
3937 for (xs = 0, rt = a->rt; xs < a->selem; xs++, rt = (rt + 1) % 32) {
3938 /* Load and replicate to all elements */
3939 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3940
3941 tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop);
3942 tcg_gen_gvec_dup_i64(a->scale, vec_full_reg_offset(s, rt),
3943 (a->q + 1) * 8, vec_full_reg_size(s), tcg_tmp);
3944 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3945 }
3946
3947 if (a->p) {
3948 if (a->rm == 31) {
3949 tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3950 } else {
3951 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm));
3952 }
3953 }
3954 return true;
3955 }
3956
trans_STZGM(DisasContext * s,arg_ldst_tag * a)3957 static bool trans_STZGM(DisasContext *s, arg_ldst_tag *a)
3958 {
3959 TCGv_i64 addr, clean_addr, tcg_rt;
3960 int size = 4 << s->dcz_blocksize;
3961
3962 if (!dc_isar_feature(aa64_mte, s)) {
3963 return false;
3964 }
3965 if (s->current_el == 0) {
3966 return false;
3967 }
3968
3969 if (a->rn == 31) {
3970 gen_check_sp_alignment(s);
3971 }
3972
3973 addr = read_cpu_reg_sp(s, a->rn, true);
3974 tcg_gen_addi_i64(addr, addr, a->imm);
3975 tcg_rt = cpu_reg(s, a->rt);
3976
3977 if (s->ata[0]) {
3978 gen_helper_stzgm_tags(tcg_env, addr, tcg_rt);
3979 }
3980 /*
3981 * The non-tags portion of STZGM is mostly like DC_ZVA,
3982 * except the alignment happens before the access.
3983 */
3984 clean_addr = clean_data_tbi(s, addr);
3985 tcg_gen_andi_i64(clean_addr, clean_addr, -size);
3986 gen_helper_dc_zva(tcg_env, clean_addr);
3987 return true;
3988 }
3989
trans_STGM(DisasContext * s,arg_ldst_tag * a)3990 static bool trans_STGM(DisasContext *s, arg_ldst_tag *a)
3991 {
3992 TCGv_i64 addr, clean_addr, tcg_rt;
3993
3994 if (!dc_isar_feature(aa64_mte, s)) {
3995 return false;
3996 }
3997 if (s->current_el == 0) {
3998 return false;
3999 }
4000
4001 if (a->rn == 31) {
4002 gen_check_sp_alignment(s);
4003 }
4004
4005 addr = read_cpu_reg_sp(s, a->rn, true);
4006 tcg_gen_addi_i64(addr, addr, a->imm);
4007 tcg_rt = cpu_reg(s, a->rt);
4008
4009 if (s->ata[0]) {
4010 gen_helper_stgm(tcg_env, addr, tcg_rt);
4011 } else {
4012 MMUAccessType acc = MMU_DATA_STORE;
4013 int size = 4 << s->gm_blocksize;
4014
4015 clean_addr = clean_data_tbi(s, addr);
4016 tcg_gen_andi_i64(clean_addr, clean_addr, -size);
4017 gen_probe_access(s, clean_addr, acc, size);
4018 }
4019 return true;
4020 }
4021
trans_LDGM(DisasContext * s,arg_ldst_tag * a)4022 static bool trans_LDGM(DisasContext *s, arg_ldst_tag *a)
4023 {
4024 TCGv_i64 addr, clean_addr, tcg_rt;
4025
4026 if (!dc_isar_feature(aa64_mte, s)) {
4027 return false;
4028 }
4029 if (s->current_el == 0) {
4030 return false;
4031 }
4032
4033 if (a->rn == 31) {
4034 gen_check_sp_alignment(s);
4035 }
4036
4037 addr = read_cpu_reg_sp(s, a->rn, true);
4038 tcg_gen_addi_i64(addr, addr, a->imm);
4039 tcg_rt = cpu_reg(s, a->rt);
4040
4041 if (s->ata[0]) {
4042 gen_helper_ldgm(tcg_rt, tcg_env, addr);
4043 } else {
4044 MMUAccessType acc = MMU_DATA_LOAD;
4045 int size = 4 << s->gm_blocksize;
4046
4047 clean_addr = clean_data_tbi(s, addr);
4048 tcg_gen_andi_i64(clean_addr, clean_addr, -size);
4049 gen_probe_access(s, clean_addr, acc, size);
4050 /* The result tags are zeros. */
4051 tcg_gen_movi_i64(tcg_rt, 0);
4052 }
4053 return true;
4054 }
4055
trans_LDG(DisasContext * s,arg_ldst_tag * a)4056 static bool trans_LDG(DisasContext *s, arg_ldst_tag *a)
4057 {
4058 TCGv_i64 addr, clean_addr, tcg_rt;
4059
4060 if (!dc_isar_feature(aa64_mte_insn_reg, s)) {
4061 return false;
4062 }
4063
4064 if (a->rn == 31) {
4065 gen_check_sp_alignment(s);
4066 }
4067
4068 addr = read_cpu_reg_sp(s, a->rn, true);
4069 if (!a->p) {
4070 /* pre-index or signed offset */
4071 tcg_gen_addi_i64(addr, addr, a->imm);
4072 }
4073
4074 tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
4075 tcg_rt = cpu_reg(s, a->rt);
4076 if (s->ata[0]) {
4077 gen_helper_ldg(tcg_rt, tcg_env, addr, tcg_rt);
4078 } else {
4079 /*
4080 * Tag access disabled: we must check for aborts on the load
4081 * load from [rn+offset], and then insert a 0 tag into rt.
4082 */
4083 clean_addr = clean_data_tbi(s, addr);
4084 gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
4085 gen_address_with_allocation_tag0(tcg_rt, tcg_rt);
4086 }
4087
4088 if (a->w) {
4089 /* pre-index or post-index */
4090 if (a->p) {
4091 /* post-index */
4092 tcg_gen_addi_i64(addr, addr, a->imm);
4093 }
4094 tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), addr);
4095 }
4096 return true;
4097 }
4098
do_STG(DisasContext * s,arg_ldst_tag * a,bool is_zero,bool is_pair)4099 static bool do_STG(DisasContext *s, arg_ldst_tag *a, bool is_zero, bool is_pair)
4100 {
4101 TCGv_i64 addr, tcg_rt;
4102
4103 if (a->rn == 31) {
4104 gen_check_sp_alignment(s);
4105 }
4106
4107 addr = read_cpu_reg_sp(s, a->rn, true);
4108 if (!a->p) {
4109 /* pre-index or signed offset */
4110 tcg_gen_addi_i64(addr, addr, a->imm);
4111 }
4112 tcg_rt = cpu_reg_sp(s, a->rt);
4113 if (!s->ata[0]) {
4114 /*
4115 * For STG and ST2G, we need to check alignment and probe memory.
4116 * TODO: For STZG and STZ2G, we could rely on the stores below,
4117 * at least for system mode; user-only won't enforce alignment.
4118 */
4119 if (is_pair) {
4120 gen_helper_st2g_stub(tcg_env, addr);
4121 } else {
4122 gen_helper_stg_stub(tcg_env, addr);
4123 }
4124 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
4125 if (is_pair) {
4126 gen_helper_st2g_parallel(tcg_env, addr, tcg_rt);
4127 } else {
4128 gen_helper_stg_parallel(tcg_env, addr, tcg_rt);
4129 }
4130 } else {
4131 if (is_pair) {
4132 gen_helper_st2g(tcg_env, addr, tcg_rt);
4133 } else {
4134 gen_helper_stg(tcg_env, addr, tcg_rt);
4135 }
4136 }
4137
4138 if (is_zero) {
4139 TCGv_i64 clean_addr = clean_data_tbi(s, addr);
4140 TCGv_i64 zero64 = tcg_constant_i64(0);
4141 TCGv_i128 zero128 = tcg_temp_new_i128();
4142 int mem_index = get_mem_index(s);
4143 MemOp mop = finalize_memop(s, MO_128 | MO_ALIGN);
4144
4145 tcg_gen_concat_i64_i128(zero128, zero64, zero64);
4146
4147 /* This is 1 or 2 atomic 16-byte operations. */
4148 tcg_gen_qemu_st_i128(zero128, clean_addr, mem_index, mop);
4149 if (is_pair) {
4150 tcg_gen_addi_i64(clean_addr, clean_addr, 16);
4151 tcg_gen_qemu_st_i128(zero128, clean_addr, mem_index, mop);
4152 }
4153 }
4154
4155 if (a->w) {
4156 /* pre-index or post-index */
4157 if (a->p) {
4158 /* post-index */
4159 tcg_gen_addi_i64(addr, addr, a->imm);
4160 }
4161 tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), addr);
4162 }
4163 return true;
4164 }
4165
4166 TRANS_FEAT(STG, aa64_mte_insn_reg, do_STG, a, false, false)
4167 TRANS_FEAT(STZG, aa64_mte_insn_reg, do_STG, a, true, false)
4168 TRANS_FEAT(ST2G, aa64_mte_insn_reg, do_STG, a, false, true)
4169 TRANS_FEAT(STZ2G, aa64_mte_insn_reg, do_STG, a, true, true)
4170
4171 typedef void SetFn(TCGv_env, TCGv_i32, TCGv_i32);
4172
do_SET(DisasContext * s,arg_set * a,bool is_epilogue,bool is_setg,SetFn fn)4173 static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue,
4174 bool is_setg, SetFn fn)
4175 {
4176 int memidx;
4177 uint32_t syndrome, desc = 0;
4178
4179 if (is_setg && !dc_isar_feature(aa64_mte, s)) {
4180 return false;
4181 }
4182
4183 /*
4184 * UNPREDICTABLE cases: we choose to UNDEF, which allows
4185 * us to pull this check before the CheckMOPSEnabled() test
4186 * (which we do in the helper function)
4187 */
4188 if (a->rs == a->rn || a->rs == a->rd || a->rn == a->rd ||
4189 a->rd == 31 || a->rn == 31) {
4190 return false;
4191 }
4192
4193 memidx = get_a64_user_mem_index(s, a->unpriv);
4194
4195 /*
4196 * We pass option_a == true, matching our implementation;
4197 * we pass wrong_option == false: helper function may set that bit.
4198 */
4199 syndrome = syn_mop(true, is_setg, (a->nontemp << 1) | a->unpriv,
4200 is_epilogue, false, true, a->rd, a->rs, a->rn);
4201
4202 if (is_setg ? s->ata[a->unpriv] : s->mte_active[a->unpriv]) {
4203 /* We may need to do MTE tag checking, so assemble the descriptor */
4204 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
4205 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
4206 desc = FIELD_DP32(desc, MTEDESC, WRITE, true);
4207 /* SIZEM1 and ALIGN we leave 0 (byte write) */
4208 }
4209 /* The helper function always needs the memidx even with MTE disabled */
4210 desc = FIELD_DP32(desc, MTEDESC, MIDX, memidx);
4211
4212 /*
4213 * The helper needs the register numbers, but since they're in
4214 * the syndrome anyway, we let it extract them from there rather
4215 * than passing in an extra three integer arguments.
4216 */
4217 fn(tcg_env, tcg_constant_i32(syndrome), tcg_constant_i32(desc));
4218 return true;
4219 }
4220
4221 TRANS_FEAT(SETP, aa64_mops, do_SET, a, false, false, gen_helper_setp)
4222 TRANS_FEAT(SETM, aa64_mops, do_SET, a, false, false, gen_helper_setm)
4223 TRANS_FEAT(SETE, aa64_mops, do_SET, a, true, false, gen_helper_sete)
4224 TRANS_FEAT(SETGP, aa64_mops, do_SET, a, false, true, gen_helper_setgp)
4225 TRANS_FEAT(SETGM, aa64_mops, do_SET, a, false, true, gen_helper_setgm)
4226 TRANS_FEAT(SETGE, aa64_mops, do_SET, a, true, true, gen_helper_setge)
4227
4228 typedef void CpyFn(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32);
4229
do_CPY(DisasContext * s,arg_cpy * a,bool is_epilogue,CpyFn fn)4230 static bool do_CPY(DisasContext *s, arg_cpy *a, bool is_epilogue, CpyFn fn)
4231 {
4232 int rmemidx, wmemidx;
4233 uint32_t syndrome, rdesc = 0, wdesc = 0;
4234 bool wunpriv = extract32(a->options, 0, 1);
4235 bool runpriv = extract32(a->options, 1, 1);
4236
4237 /*
4238 * UNPREDICTABLE cases: we choose to UNDEF, which allows
4239 * us to pull this check before the CheckMOPSEnabled() test
4240 * (which we do in the helper function)
4241 */
4242 if (a->rs == a->rn || a->rs == a->rd || a->rn == a->rd ||
4243 a->rd == 31 || a->rs == 31 || a->rn == 31) {
4244 return false;
4245 }
4246
4247 rmemidx = get_a64_user_mem_index(s, runpriv);
4248 wmemidx = get_a64_user_mem_index(s, wunpriv);
4249
4250 /*
4251 * We pass option_a == true, matching our implementation;
4252 * we pass wrong_option == false: helper function may set that bit.
4253 */
4254 syndrome = syn_mop(false, false, a->options, is_epilogue,
4255 false, true, a->rd, a->rs, a->rn);
4256
4257 /* If we need to do MTE tag checking, assemble the descriptors */
4258 if (s->mte_active[runpriv]) {
4259 rdesc = FIELD_DP32(rdesc, MTEDESC, TBI, s->tbid);
4260 rdesc = FIELD_DP32(rdesc, MTEDESC, TCMA, s->tcma);
4261 }
4262 if (s->mte_active[wunpriv]) {
4263 wdesc = FIELD_DP32(wdesc, MTEDESC, TBI, s->tbid);
4264 wdesc = FIELD_DP32(wdesc, MTEDESC, TCMA, s->tcma);
4265 wdesc = FIELD_DP32(wdesc, MTEDESC, WRITE, true);
4266 }
4267 /* The helper function needs these parts of the descriptor regardless */
4268 rdesc = FIELD_DP32(rdesc, MTEDESC, MIDX, rmemidx);
4269 wdesc = FIELD_DP32(wdesc, MTEDESC, MIDX, wmemidx);
4270
4271 /*
4272 * The helper needs the register numbers, but since they're in
4273 * the syndrome anyway, we let it extract them from there rather
4274 * than passing in an extra three integer arguments.
4275 */
4276 fn(tcg_env, tcg_constant_i32(syndrome), tcg_constant_i32(wdesc),
4277 tcg_constant_i32(rdesc));
4278 return true;
4279 }
4280
4281 TRANS_FEAT(CPYP, aa64_mops, do_CPY, a, false, gen_helper_cpyp)
4282 TRANS_FEAT(CPYM, aa64_mops, do_CPY, a, false, gen_helper_cpym)
4283 TRANS_FEAT(CPYE, aa64_mops, do_CPY, a, true, gen_helper_cpye)
4284 TRANS_FEAT(CPYFP, aa64_mops, do_CPY, a, false, gen_helper_cpyfp)
4285 TRANS_FEAT(CPYFM, aa64_mops, do_CPY, a, false, gen_helper_cpyfm)
4286 TRANS_FEAT(CPYFE, aa64_mops, do_CPY, a, true, gen_helper_cpyfe)
4287
4288 typedef void ArithTwoOp(TCGv_i64, TCGv_i64, TCGv_i64);
4289
gen_rri(DisasContext * s,arg_rri_sf * a,bool rd_sp,bool rn_sp,ArithTwoOp * fn)4290 static bool gen_rri(DisasContext *s, arg_rri_sf *a,
4291 bool rd_sp, bool rn_sp, ArithTwoOp *fn)
4292 {
4293 TCGv_i64 tcg_rn = rn_sp ? cpu_reg_sp(s, a->rn) : cpu_reg(s, a->rn);
4294 TCGv_i64 tcg_rd = rd_sp ? cpu_reg_sp(s, a->rd) : cpu_reg(s, a->rd);
4295 TCGv_i64 tcg_imm = tcg_constant_i64(a->imm);
4296
4297 fn(tcg_rd, tcg_rn, tcg_imm);
4298 if (!a->sf) {
4299 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4300 }
4301 return true;
4302 }
4303
4304 /*
4305 * PC-rel. addressing
4306 */
4307
trans_ADR(DisasContext * s,arg_ri * a)4308 static bool trans_ADR(DisasContext *s, arg_ri *a)
4309 {
4310 gen_pc_plus_diff(s, cpu_reg(s, a->rd), a->imm);
4311 return true;
4312 }
4313
trans_ADRP(DisasContext * s,arg_ri * a)4314 static bool trans_ADRP(DisasContext *s, arg_ri *a)
4315 {
4316 int64_t offset = (int64_t)a->imm << 12;
4317
4318 /* The page offset is ok for CF_PCREL. */
4319 offset -= s->pc_curr & 0xfff;
4320 gen_pc_plus_diff(s, cpu_reg(s, a->rd), offset);
4321 return true;
4322 }
4323
4324 /*
4325 * Add/subtract (immediate)
4326 */
4327 TRANS(ADD_i, gen_rri, a, 1, 1, tcg_gen_add_i64)
4328 TRANS(SUB_i, gen_rri, a, 1, 1, tcg_gen_sub_i64)
4329 TRANS(ADDS_i, gen_rri, a, 0, 1, a->sf ? gen_add64_CC : gen_add32_CC)
4330 TRANS(SUBS_i, gen_rri, a, 0, 1, a->sf ? gen_sub64_CC : gen_sub32_CC)
4331
4332 /*
4333 * Add/subtract (immediate, with tags)
4334 */
4335
gen_add_sub_imm_with_tags(DisasContext * s,arg_rri_tag * a,bool sub_op)4336 static bool gen_add_sub_imm_with_tags(DisasContext *s, arg_rri_tag *a,
4337 bool sub_op)
4338 {
4339 TCGv_i64 tcg_rn, tcg_rd;
4340 int imm;
4341
4342 imm = a->uimm6 << LOG2_TAG_GRANULE;
4343 if (sub_op) {
4344 imm = -imm;
4345 }
4346
4347 tcg_rn = cpu_reg_sp(s, a->rn);
4348 tcg_rd = cpu_reg_sp(s, a->rd);
4349
4350 if (s->ata[0]) {
4351 gen_helper_addsubg(tcg_rd, tcg_env, tcg_rn,
4352 tcg_constant_i32(imm),
4353 tcg_constant_i32(a->uimm4));
4354 } else {
4355 tcg_gen_addi_i64(tcg_rd, tcg_rn, imm);
4356 gen_address_with_allocation_tag0(tcg_rd, tcg_rd);
4357 }
4358 return true;
4359 }
4360
TRANS_FEAT(ADDG_i,aa64_mte_insn_reg,gen_add_sub_imm_with_tags,a,false)4361 TRANS_FEAT(ADDG_i, aa64_mte_insn_reg, gen_add_sub_imm_with_tags, a, false)
4362 TRANS_FEAT(SUBG_i, aa64_mte_insn_reg, gen_add_sub_imm_with_tags, a, true)
4363
4364 /* The input should be a value in the bottom e bits (with higher
4365 * bits zero); returns that value replicated into every element
4366 * of size e in a 64 bit integer.
4367 */
4368 static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
4369 {
4370 assert(e != 0);
4371 while (e < 64) {
4372 mask |= mask << e;
4373 e *= 2;
4374 }
4375 return mask;
4376 }
4377
4378 /*
4379 * Logical (immediate)
4380 */
4381
4382 /*
4383 * Simplified variant of pseudocode DecodeBitMasks() for the case where we
4384 * only require the wmask. Returns false if the imms/immr/immn are a reserved
4385 * value (ie should cause a guest UNDEF exception), and true if they are
4386 * valid, in which case the decoded bit pattern is written to result.
4387 */
logic_imm_decode_wmask(uint64_t * result,unsigned int immn,unsigned int imms,unsigned int immr)4388 bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
4389 unsigned int imms, unsigned int immr)
4390 {
4391 uint64_t mask;
4392 unsigned e, levels, s, r;
4393 int len;
4394
4395 assert(immn < 2 && imms < 64 && immr < 64);
4396
4397 /* The bit patterns we create here are 64 bit patterns which
4398 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
4399 * 64 bits each. Each element contains the same value: a run
4400 * of between 1 and e-1 non-zero bits, rotated within the
4401 * element by between 0 and e-1 bits.
4402 *
4403 * The element size and run length are encoded into immn (1 bit)
4404 * and imms (6 bits) as follows:
4405 * 64 bit elements: immn = 1, imms = <length of run - 1>
4406 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
4407 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
4408 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
4409 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
4410 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
4411 * Notice that immn = 0, imms = 11111x is the only combination
4412 * not covered by one of the above options; this is reserved.
4413 * Further, <length of run - 1> all-ones is a reserved pattern.
4414 *
4415 * In all cases the rotation is by immr % e (and immr is 6 bits).
4416 */
4417
4418 /* First determine the element size */
4419 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
4420 if (len < 1) {
4421 /* This is the immn == 0, imms == 0x11111x case */
4422 return false;
4423 }
4424 e = 1 << len;
4425
4426 levels = e - 1;
4427 s = imms & levels;
4428 r = immr & levels;
4429
4430 if (s == levels) {
4431 /* <length of run - 1> mustn't be all-ones. */
4432 return false;
4433 }
4434
4435 /* Create the value of one element: s+1 set bits rotated
4436 * by r within the element (which is e bits wide)...
4437 */
4438 mask = MAKE_64BIT_MASK(0, s + 1);
4439 if (r) {
4440 mask = (mask >> r) | (mask << (e - r));
4441 mask &= MAKE_64BIT_MASK(0, e);
4442 }
4443 /* ...then replicate the element over the whole 64 bit value */
4444 mask = bitfield_replicate(mask, e);
4445 *result = mask;
4446 return true;
4447 }
4448
gen_rri_log(DisasContext * s,arg_rri_log * a,bool set_cc,void (* fn)(TCGv_i64,TCGv_i64,int64_t))4449 static bool gen_rri_log(DisasContext *s, arg_rri_log *a, bool set_cc,
4450 void (*fn)(TCGv_i64, TCGv_i64, int64_t))
4451 {
4452 TCGv_i64 tcg_rd, tcg_rn;
4453 uint64_t imm;
4454
4455 /* Some immediate field values are reserved. */
4456 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1),
4457 extract32(a->dbm, 0, 6),
4458 extract32(a->dbm, 6, 6))) {
4459 return false;
4460 }
4461 if (!a->sf) {
4462 imm &= 0xffffffffull;
4463 }
4464
4465 tcg_rd = set_cc ? cpu_reg(s, a->rd) : cpu_reg_sp(s, a->rd);
4466 tcg_rn = cpu_reg(s, a->rn);
4467
4468 fn(tcg_rd, tcg_rn, imm);
4469 if (set_cc) {
4470 gen_logic_CC(a->sf, tcg_rd);
4471 }
4472 if (!a->sf) {
4473 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4474 }
4475 return true;
4476 }
4477
TRANS(AND_i,gen_rri_log,a,false,tcg_gen_andi_i64)4478 TRANS(AND_i, gen_rri_log, a, false, tcg_gen_andi_i64)
4479 TRANS(ORR_i, gen_rri_log, a, false, tcg_gen_ori_i64)
4480 TRANS(EOR_i, gen_rri_log, a, false, tcg_gen_xori_i64)
4481 TRANS(ANDS_i, gen_rri_log, a, true, tcg_gen_andi_i64)
4482
4483 /*
4484 * Move wide (immediate)
4485 */
4486
4487 static bool trans_MOVZ(DisasContext *s, arg_movw *a)
4488 {
4489 int pos = a->hw << 4;
4490 tcg_gen_movi_i64(cpu_reg(s, a->rd), (uint64_t)a->imm << pos);
4491 return true;
4492 }
4493
trans_MOVN(DisasContext * s,arg_movw * a)4494 static bool trans_MOVN(DisasContext *s, arg_movw *a)
4495 {
4496 int pos = a->hw << 4;
4497 uint64_t imm = a->imm;
4498
4499 imm = ~(imm << pos);
4500 if (!a->sf) {
4501 imm = (uint32_t)imm;
4502 }
4503 tcg_gen_movi_i64(cpu_reg(s, a->rd), imm);
4504 return true;
4505 }
4506
trans_MOVK(DisasContext * s,arg_movw * a)4507 static bool trans_MOVK(DisasContext *s, arg_movw *a)
4508 {
4509 int pos = a->hw << 4;
4510 TCGv_i64 tcg_rd, tcg_im;
4511
4512 tcg_rd = cpu_reg(s, a->rd);
4513 tcg_im = tcg_constant_i64(a->imm);
4514 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_im, pos, 16);
4515 if (!a->sf) {
4516 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4517 }
4518 return true;
4519 }
4520
4521 /*
4522 * Bitfield
4523 */
4524
trans_SBFM(DisasContext * s,arg_SBFM * a)4525 static bool trans_SBFM(DisasContext *s, arg_SBFM *a)
4526 {
4527 TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
4528 TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1);
4529 unsigned int bitsize = a->sf ? 64 : 32;
4530 unsigned int ri = a->immr;
4531 unsigned int si = a->imms;
4532 unsigned int pos, len;
4533
4534 if (si >= ri) {
4535 /* Wd<s-r:0> = Wn<s:r> */
4536 len = (si - ri) + 1;
4537 tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
4538 if (!a->sf) {
4539 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4540 }
4541 } else {
4542 /* Wd<32+s-r,32-r> = Wn<s:0> */
4543 len = si + 1;
4544 pos = (bitsize - ri) & (bitsize - 1);
4545
4546 if (len < ri) {
4547 /*
4548 * Sign extend the destination field from len to fill the
4549 * balance of the word. Let the deposit below insert all
4550 * of those sign bits.
4551 */
4552 tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
4553 len = ri;
4554 }
4555
4556 /*
4557 * We start with zero, and we haven't modified any bits outside
4558 * bitsize, therefore no final zero-extension is unneeded for !sf.
4559 */
4560 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
4561 }
4562 return true;
4563 }
4564
trans_UBFM(DisasContext * s,arg_UBFM * a)4565 static bool trans_UBFM(DisasContext *s, arg_UBFM *a)
4566 {
4567 TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
4568 TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1);
4569 unsigned int bitsize = a->sf ? 64 : 32;
4570 unsigned int ri = a->immr;
4571 unsigned int si = a->imms;
4572 unsigned int pos, len;
4573
4574 tcg_rd = cpu_reg(s, a->rd);
4575 tcg_tmp = read_cpu_reg(s, a->rn, 1);
4576
4577 if (si >= ri) {
4578 /* Wd<s-r:0> = Wn<s:r> */
4579 len = (si - ri) + 1;
4580 tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
4581 } else {
4582 /* Wd<32+s-r,32-r> = Wn<s:0> */
4583 len = si + 1;
4584 pos = (bitsize - ri) & (bitsize - 1);
4585 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
4586 }
4587 return true;
4588 }
4589
trans_BFM(DisasContext * s,arg_BFM * a)4590 static bool trans_BFM(DisasContext *s, arg_BFM *a)
4591 {
4592 TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
4593 TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1);
4594 unsigned int bitsize = a->sf ? 64 : 32;
4595 unsigned int ri = a->immr;
4596 unsigned int si = a->imms;
4597 unsigned int pos, len;
4598
4599 tcg_rd = cpu_reg(s, a->rd);
4600 tcg_tmp = read_cpu_reg(s, a->rn, 1);
4601
4602 if (si >= ri) {
4603 /* Wd<s-r:0> = Wn<s:r> */
4604 tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
4605 len = (si - ri) + 1;
4606 pos = 0;
4607 } else {
4608 /* Wd<32+s-r,32-r> = Wn<s:0> */
4609 len = si + 1;
4610 pos = (bitsize - ri) & (bitsize - 1);
4611 }
4612
4613 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
4614 if (!a->sf) {
4615 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4616 }
4617 return true;
4618 }
4619
trans_EXTR(DisasContext * s,arg_extract * a)4620 static bool trans_EXTR(DisasContext *s, arg_extract *a)
4621 {
4622 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
4623
4624 tcg_rd = cpu_reg(s, a->rd);
4625
4626 if (unlikely(a->imm == 0)) {
4627 /*
4628 * tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
4629 * so an extract from bit 0 is a special case.
4630 */
4631 if (a->sf) {
4632 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, a->rm));
4633 } else {
4634 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, a->rm));
4635 }
4636 } else {
4637 tcg_rm = cpu_reg(s, a->rm);
4638 tcg_rn = cpu_reg(s, a->rn);
4639
4640 if (a->sf) {
4641 /* Specialization to ROR happens in EXTRACT2. */
4642 tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, a->imm);
4643 } else {
4644 TCGv_i32 t0 = tcg_temp_new_i32();
4645
4646 tcg_gen_extrl_i64_i32(t0, tcg_rm);
4647 if (a->rm == a->rn) {
4648 tcg_gen_rotri_i32(t0, t0, a->imm);
4649 } else {
4650 TCGv_i32 t1 = tcg_temp_new_i32();
4651 tcg_gen_extrl_i64_i32(t1, tcg_rn);
4652 tcg_gen_extract2_i32(t0, t0, t1, a->imm);
4653 }
4654 tcg_gen_extu_i32_i64(tcg_rd, t0);
4655 }
4656 }
4657 return true;
4658 }
4659
4660 /*
4661 * Cryptographic AES, SHA, SHA512
4662 */
4663
4664 TRANS_FEAT(AESE, aa64_aes, do_gvec_op3_ool, a, 0, gen_helper_crypto_aese)
4665 TRANS_FEAT(AESD, aa64_aes, do_gvec_op3_ool, a, 0, gen_helper_crypto_aesd)
4666 TRANS_FEAT(AESMC, aa64_aes, do_gvec_op2_ool, a, 0, gen_helper_crypto_aesmc)
4667 TRANS_FEAT(AESIMC, aa64_aes, do_gvec_op2_ool, a, 0, gen_helper_crypto_aesimc)
4668
4669 TRANS_FEAT(SHA1C, aa64_sha1, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha1c)
4670 TRANS_FEAT(SHA1P, aa64_sha1, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha1p)
4671 TRANS_FEAT(SHA1M, aa64_sha1, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha1m)
4672 TRANS_FEAT(SHA1SU0, aa64_sha1, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha1su0)
4673
4674 TRANS_FEAT(SHA256H, aa64_sha256, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha256h)
4675 TRANS_FEAT(SHA256H2, aa64_sha256, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha256h2)
4676 TRANS_FEAT(SHA256SU1, aa64_sha256, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha256su1)
4677
4678 TRANS_FEAT(SHA1H, aa64_sha1, do_gvec_op2_ool, a, 0, gen_helper_crypto_sha1h)
4679 TRANS_FEAT(SHA1SU1, aa64_sha1, do_gvec_op2_ool, a, 0, gen_helper_crypto_sha1su1)
4680 TRANS_FEAT(SHA256SU0, aa64_sha256, do_gvec_op2_ool, a, 0, gen_helper_crypto_sha256su0)
4681
4682 TRANS_FEAT(SHA512H, aa64_sha512, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha512h)
4683 TRANS_FEAT(SHA512H2, aa64_sha512, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha512h2)
4684 TRANS_FEAT(SHA512SU1, aa64_sha512, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha512su1)
TRANS_FEAT(RAX1,aa64_sha3,do_gvec_fn3,a,gen_gvec_rax1)4685 TRANS_FEAT(RAX1, aa64_sha3, do_gvec_fn3, a, gen_gvec_rax1)
4686 TRANS_FEAT(SM3PARTW1, aa64_sm3, do_gvec_op3_ool, a, 0, gen_helper_crypto_sm3partw1)
4687 TRANS_FEAT(SM3PARTW2, aa64_sm3, do_gvec_op3_ool, a, 0, gen_helper_crypto_sm3partw2)
4688 TRANS_FEAT(SM4EKEY, aa64_sm4, do_gvec_op3_ool, a, 0, gen_helper_crypto_sm4ekey)
4689
4690 TRANS_FEAT(SHA512SU0, aa64_sha512, do_gvec_op2_ool, a, 0, gen_helper_crypto_sha512su0)
4691 TRANS_FEAT(SM4E, aa64_sm4, do_gvec_op3_ool, a, 0, gen_helper_crypto_sm4e)
4692
4693 TRANS_FEAT(EOR3, aa64_sha3, do_gvec_fn4, a, gen_gvec_eor3)
4694 TRANS_FEAT(BCAX, aa64_sha3, do_gvec_fn4, a, gen_gvec_bcax)
4695
4696 static bool trans_SM3SS1(DisasContext *s, arg_SM3SS1 *a)
4697 {
4698 if (!dc_isar_feature(aa64_sm3, s)) {
4699 return false;
4700 }
4701 if (fp_access_check(s)) {
4702 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
4703 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
4704 TCGv_i32 tcg_op3 = tcg_temp_new_i32();
4705 TCGv_i32 tcg_res = tcg_temp_new_i32();
4706 unsigned vsz, dofs;
4707
4708 read_vec_element_i32(s, tcg_op1, a->rn, 3, MO_32);
4709 read_vec_element_i32(s, tcg_op2, a->rm, 3, MO_32);
4710 read_vec_element_i32(s, tcg_op3, a->ra, 3, MO_32);
4711
4712 tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
4713 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
4714 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
4715 tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
4716
4717 /* Clear the whole register first, then store bits [127:96]. */
4718 vsz = vec_full_reg_size(s);
4719 dofs = vec_full_reg_offset(s, a->rd);
4720 tcg_gen_gvec_dup_imm(MO_64, dofs, vsz, vsz, 0);
4721 write_vec_element_i32(s, tcg_res, a->rd, 3, MO_32);
4722 }
4723 return true;
4724 }
4725
do_crypto3i(DisasContext * s,arg_crypto3i * a,gen_helper_gvec_3 * fn)4726 static bool do_crypto3i(DisasContext *s, arg_crypto3i *a, gen_helper_gvec_3 *fn)
4727 {
4728 if (fp_access_check(s)) {
4729 gen_gvec_op3_ool(s, true, a->rd, a->rn, a->rm, a->imm, fn);
4730 }
4731 return true;
4732 }
TRANS_FEAT(SM3TT1A,aa64_sm3,do_crypto3i,a,gen_helper_crypto_sm3tt1a)4733 TRANS_FEAT(SM3TT1A, aa64_sm3, do_crypto3i, a, gen_helper_crypto_sm3tt1a)
4734 TRANS_FEAT(SM3TT1B, aa64_sm3, do_crypto3i, a, gen_helper_crypto_sm3tt1b)
4735 TRANS_FEAT(SM3TT2A, aa64_sm3, do_crypto3i, a, gen_helper_crypto_sm3tt2a)
4736 TRANS_FEAT(SM3TT2B, aa64_sm3, do_crypto3i, a, gen_helper_crypto_sm3tt2b)
4737
4738 static bool trans_XAR(DisasContext *s, arg_XAR *a)
4739 {
4740 if (!dc_isar_feature(aa64_sha3, s)) {
4741 return false;
4742 }
4743 if (fp_access_check(s)) {
4744 gen_gvec_xar(MO_64, vec_full_reg_offset(s, a->rd),
4745 vec_full_reg_offset(s, a->rn),
4746 vec_full_reg_offset(s, a->rm), a->imm, 16,
4747 vec_full_reg_size(s));
4748 }
4749 return true;
4750 }
4751
4752 /*
4753 * Advanced SIMD copy
4754 */
4755
decode_esz_idx(int imm,MemOp * pesz,unsigned * pidx)4756 static bool decode_esz_idx(int imm, MemOp *pesz, unsigned *pidx)
4757 {
4758 unsigned esz = ctz32(imm);
4759 if (esz <= MO_64) {
4760 *pesz = esz;
4761 *pidx = imm >> (esz + 1);
4762 return true;
4763 }
4764 return false;
4765 }
4766
trans_DUP_element_s(DisasContext * s,arg_DUP_element_s * a)4767 static bool trans_DUP_element_s(DisasContext *s, arg_DUP_element_s *a)
4768 {
4769 MemOp esz;
4770 unsigned idx;
4771
4772 if (!decode_esz_idx(a->imm, &esz, &idx)) {
4773 return false;
4774 }
4775 if (fp_access_check(s)) {
4776 /*
4777 * This instruction just extracts the specified element and
4778 * zero-extends it into the bottom of the destination register.
4779 */
4780 TCGv_i64 tmp = tcg_temp_new_i64();
4781 read_vec_element(s, tmp, a->rn, idx, esz);
4782 write_fp_dreg(s, a->rd, tmp);
4783 }
4784 return true;
4785 }
4786
trans_DUP_element_v(DisasContext * s,arg_DUP_element_v * a)4787 static bool trans_DUP_element_v(DisasContext *s, arg_DUP_element_v *a)
4788 {
4789 MemOp esz;
4790 unsigned idx;
4791
4792 if (!decode_esz_idx(a->imm, &esz, &idx)) {
4793 return false;
4794 }
4795 if (esz == MO_64 && !a->q) {
4796 return false;
4797 }
4798 if (fp_access_check(s)) {
4799 tcg_gen_gvec_dup_mem(esz, vec_full_reg_offset(s, a->rd),
4800 vec_reg_offset(s, a->rn, idx, esz),
4801 a->q ? 16 : 8, vec_full_reg_size(s));
4802 }
4803 return true;
4804 }
4805
trans_DUP_general(DisasContext * s,arg_DUP_general * a)4806 static bool trans_DUP_general(DisasContext *s, arg_DUP_general *a)
4807 {
4808 MemOp esz;
4809 unsigned idx;
4810
4811 if (!decode_esz_idx(a->imm, &esz, &idx)) {
4812 return false;
4813 }
4814 if (esz == MO_64 && !a->q) {
4815 return false;
4816 }
4817 if (fp_access_check(s)) {
4818 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd),
4819 a->q ? 16 : 8, vec_full_reg_size(s),
4820 cpu_reg(s, a->rn));
4821 }
4822 return true;
4823 }
4824
do_smov_umov(DisasContext * s,arg_SMOV * a,MemOp is_signed)4825 static bool do_smov_umov(DisasContext *s, arg_SMOV *a, MemOp is_signed)
4826 {
4827 MemOp esz;
4828 unsigned idx;
4829
4830 if (!decode_esz_idx(a->imm, &esz, &idx)) {
4831 return false;
4832 }
4833 if (is_signed) {
4834 if (esz == MO_64 || (esz == MO_32 && !a->q)) {
4835 return false;
4836 }
4837 } else {
4838 if (esz == MO_64 ? !a->q : a->q) {
4839 return false;
4840 }
4841 }
4842 if (fp_access_check(s)) {
4843 TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
4844 read_vec_element(s, tcg_rd, a->rn, idx, esz | is_signed);
4845 if (is_signed && !a->q) {
4846 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4847 }
4848 }
4849 return true;
4850 }
4851
TRANS(SMOV,do_smov_umov,a,MO_SIGN)4852 TRANS(SMOV, do_smov_umov, a, MO_SIGN)
4853 TRANS(UMOV, do_smov_umov, a, 0)
4854
4855 static bool trans_INS_general(DisasContext *s, arg_INS_general *a)
4856 {
4857 MemOp esz;
4858 unsigned idx;
4859
4860 if (!decode_esz_idx(a->imm, &esz, &idx)) {
4861 return false;
4862 }
4863 if (fp_access_check(s)) {
4864 write_vec_element(s, cpu_reg(s, a->rn), a->rd, idx, esz);
4865 clear_vec_high(s, true, a->rd);
4866 }
4867 return true;
4868 }
4869
trans_INS_element(DisasContext * s,arg_INS_element * a)4870 static bool trans_INS_element(DisasContext *s, arg_INS_element *a)
4871 {
4872 MemOp esz;
4873 unsigned didx, sidx;
4874
4875 if (!decode_esz_idx(a->di, &esz, &didx)) {
4876 return false;
4877 }
4878 sidx = a->si >> esz;
4879 if (fp_access_check(s)) {
4880 TCGv_i64 tmp = tcg_temp_new_i64();
4881
4882 read_vec_element(s, tmp, a->rn, sidx, esz);
4883 write_vec_element(s, tmp, a->rd, didx, esz);
4884
4885 /* INS is considered a 128-bit write for SVE. */
4886 clear_vec_high(s, true, a->rd);
4887 }
4888 return true;
4889 }
4890
4891 /*
4892 * Advanced SIMD three same
4893 */
4894
4895 typedef struct FPScalar {
4896 void (*gen_h)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
4897 void (*gen_s)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
4898 void (*gen_d)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
4899 } FPScalar;
4900
do_fp3_scalar(DisasContext * s,arg_rrr_e * a,const FPScalar * f)4901 static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f)
4902 {
4903 switch (a->esz) {
4904 case MO_64:
4905 if (fp_access_check(s)) {
4906 TCGv_i64 t0 = read_fp_dreg(s, a->rn);
4907 TCGv_i64 t1 = read_fp_dreg(s, a->rm);
4908 f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_FPCR));
4909 write_fp_dreg(s, a->rd, t0);
4910 }
4911 break;
4912 case MO_32:
4913 if (fp_access_check(s)) {
4914 TCGv_i32 t0 = read_fp_sreg(s, a->rn);
4915 TCGv_i32 t1 = read_fp_sreg(s, a->rm);
4916 f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_FPCR));
4917 write_fp_sreg(s, a->rd, t0);
4918 }
4919 break;
4920 case MO_16:
4921 if (!dc_isar_feature(aa64_fp16, s)) {
4922 return false;
4923 }
4924 if (fp_access_check(s)) {
4925 TCGv_i32 t0 = read_fp_hreg(s, a->rn);
4926 TCGv_i32 t1 = read_fp_hreg(s, a->rm);
4927 f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_FPCR_F16));
4928 write_fp_sreg(s, a->rd, t0);
4929 }
4930 break;
4931 default:
4932 return false;
4933 }
4934 return true;
4935 }
4936
4937 static const FPScalar f_scalar_fadd = {
4938 gen_helper_vfp_addh,
4939 gen_helper_vfp_adds,
4940 gen_helper_vfp_addd,
4941 };
4942 TRANS(FADD_s, do_fp3_scalar, a, &f_scalar_fadd)
4943
4944 static const FPScalar f_scalar_fsub = {
4945 gen_helper_vfp_subh,
4946 gen_helper_vfp_subs,
4947 gen_helper_vfp_subd,
4948 };
4949 TRANS(FSUB_s, do_fp3_scalar, a, &f_scalar_fsub)
4950
4951 static const FPScalar f_scalar_fdiv = {
4952 gen_helper_vfp_divh,
4953 gen_helper_vfp_divs,
4954 gen_helper_vfp_divd,
4955 };
4956 TRANS(FDIV_s, do_fp3_scalar, a, &f_scalar_fdiv)
4957
4958 static const FPScalar f_scalar_fmul = {
4959 gen_helper_vfp_mulh,
4960 gen_helper_vfp_muls,
4961 gen_helper_vfp_muld,
4962 };
4963 TRANS(FMUL_s, do_fp3_scalar, a, &f_scalar_fmul)
4964
4965 static const FPScalar f_scalar_fmax = {
4966 gen_helper_advsimd_maxh,
4967 gen_helper_vfp_maxs,
4968 gen_helper_vfp_maxd,
4969 };
4970 TRANS(FMAX_s, do_fp3_scalar, a, &f_scalar_fmax)
4971
4972 static const FPScalar f_scalar_fmin = {
4973 gen_helper_advsimd_minh,
4974 gen_helper_vfp_mins,
4975 gen_helper_vfp_mind,
4976 };
4977 TRANS(FMIN_s, do_fp3_scalar, a, &f_scalar_fmin)
4978
4979 static const FPScalar f_scalar_fmaxnm = {
4980 gen_helper_advsimd_maxnumh,
4981 gen_helper_vfp_maxnums,
4982 gen_helper_vfp_maxnumd,
4983 };
4984 TRANS(FMAXNM_s, do_fp3_scalar, a, &f_scalar_fmaxnm)
4985
4986 static const FPScalar f_scalar_fminnm = {
4987 gen_helper_advsimd_minnumh,
4988 gen_helper_vfp_minnums,
4989 gen_helper_vfp_minnumd,
4990 };
4991 TRANS(FMINNM_s, do_fp3_scalar, a, &f_scalar_fminnm)
4992
4993 static const FPScalar f_scalar_fmulx = {
4994 gen_helper_advsimd_mulxh,
4995 gen_helper_vfp_mulxs,
4996 gen_helper_vfp_mulxd,
4997 };
4998 TRANS(FMULX_s, do_fp3_scalar, a, &f_scalar_fmulx)
4999
gen_fnmul_h(TCGv_i32 d,TCGv_i32 n,TCGv_i32 m,TCGv_ptr s)5000 static void gen_fnmul_h(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s)
5001 {
5002 gen_helper_vfp_mulh(d, n, m, s);
5003 gen_vfp_negh(d, d);
5004 }
5005
gen_fnmul_s(TCGv_i32 d,TCGv_i32 n,TCGv_i32 m,TCGv_ptr s)5006 static void gen_fnmul_s(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s)
5007 {
5008 gen_helper_vfp_muls(d, n, m, s);
5009 gen_vfp_negs(d, d);
5010 }
5011
gen_fnmul_d(TCGv_i64 d,TCGv_i64 n,TCGv_i64 m,TCGv_ptr s)5012 static void gen_fnmul_d(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_ptr s)
5013 {
5014 gen_helper_vfp_muld(d, n, m, s);
5015 gen_vfp_negd(d, d);
5016 }
5017
5018 static const FPScalar f_scalar_fnmul = {
5019 gen_fnmul_h,
5020 gen_fnmul_s,
5021 gen_fnmul_d,
5022 };
5023 TRANS(FNMUL_s, do_fp3_scalar, a, &f_scalar_fnmul)
5024
5025 static const FPScalar f_scalar_fcmeq = {
5026 gen_helper_advsimd_ceq_f16,
5027 gen_helper_neon_ceq_f32,
5028 gen_helper_neon_ceq_f64,
5029 };
5030 TRANS(FCMEQ_s, do_fp3_scalar, a, &f_scalar_fcmeq)
5031
5032 static const FPScalar f_scalar_fcmge = {
5033 gen_helper_advsimd_cge_f16,
5034 gen_helper_neon_cge_f32,
5035 gen_helper_neon_cge_f64,
5036 };
5037 TRANS(FCMGE_s, do_fp3_scalar, a, &f_scalar_fcmge)
5038
5039 static const FPScalar f_scalar_fcmgt = {
5040 gen_helper_advsimd_cgt_f16,
5041 gen_helper_neon_cgt_f32,
5042 gen_helper_neon_cgt_f64,
5043 };
5044 TRANS(FCMGT_s, do_fp3_scalar, a, &f_scalar_fcmgt)
5045
5046 static const FPScalar f_scalar_facge = {
5047 gen_helper_advsimd_acge_f16,
5048 gen_helper_neon_acge_f32,
5049 gen_helper_neon_acge_f64,
5050 };
5051 TRANS(FACGE_s, do_fp3_scalar, a, &f_scalar_facge)
5052
5053 static const FPScalar f_scalar_facgt = {
5054 gen_helper_advsimd_acgt_f16,
5055 gen_helper_neon_acgt_f32,
5056 gen_helper_neon_acgt_f64,
5057 };
5058 TRANS(FACGT_s, do_fp3_scalar, a, &f_scalar_facgt)
5059
gen_fabd_h(TCGv_i32 d,TCGv_i32 n,TCGv_i32 m,TCGv_ptr s)5060 static void gen_fabd_h(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s)
5061 {
5062 gen_helper_vfp_subh(d, n, m, s);
5063 gen_vfp_absh(d, d);
5064 }
5065
gen_fabd_s(TCGv_i32 d,TCGv_i32 n,TCGv_i32 m,TCGv_ptr s)5066 static void gen_fabd_s(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s)
5067 {
5068 gen_helper_vfp_subs(d, n, m, s);
5069 gen_vfp_abss(d, d);
5070 }
5071
gen_fabd_d(TCGv_i64 d,TCGv_i64 n,TCGv_i64 m,TCGv_ptr s)5072 static void gen_fabd_d(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_ptr s)
5073 {
5074 gen_helper_vfp_subd(d, n, m, s);
5075 gen_vfp_absd(d, d);
5076 }
5077
5078 static const FPScalar f_scalar_fabd = {
5079 gen_fabd_h,
5080 gen_fabd_s,
5081 gen_fabd_d,
5082 };
5083 TRANS(FABD_s, do_fp3_scalar, a, &f_scalar_fabd)
5084
5085 static const FPScalar f_scalar_frecps = {
5086 gen_helper_recpsf_f16,
5087 gen_helper_recpsf_f32,
5088 gen_helper_recpsf_f64,
5089 };
5090 TRANS(FRECPS_s, do_fp3_scalar, a, &f_scalar_frecps)
5091
5092 static const FPScalar f_scalar_frsqrts = {
5093 gen_helper_rsqrtsf_f16,
5094 gen_helper_rsqrtsf_f32,
5095 gen_helper_rsqrtsf_f64,
5096 };
5097 TRANS(FRSQRTS_s, do_fp3_scalar, a, &f_scalar_frsqrts)
5098
do_satacc_s(DisasContext * s,arg_rrr_e * a,MemOp sgn_n,MemOp sgn_m,void (* gen_bhs)(TCGv_i64,TCGv_i64,TCGv_i64,TCGv_i64,MemOp),void (* gen_d)(TCGv_i64,TCGv_i64,TCGv_i64,TCGv_i64))5099 static bool do_satacc_s(DisasContext *s, arg_rrr_e *a,
5100 MemOp sgn_n, MemOp sgn_m,
5101 void (*gen_bhs)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64, MemOp),
5102 void (*gen_d)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5103 {
5104 TCGv_i64 t0, t1, t2, qc;
5105 MemOp esz = a->esz;
5106
5107 if (!fp_access_check(s)) {
5108 return true;
5109 }
5110
5111 t0 = tcg_temp_new_i64();
5112 t1 = tcg_temp_new_i64();
5113 t2 = tcg_temp_new_i64();
5114 qc = tcg_temp_new_i64();
5115 read_vec_element(s, t1, a->rn, 0, esz | sgn_n);
5116 read_vec_element(s, t2, a->rm, 0, esz | sgn_m);
5117 tcg_gen_ld_i64(qc, tcg_env, offsetof(CPUARMState, vfp.qc));
5118
5119 if (esz == MO_64) {
5120 gen_d(t0, qc, t1, t2);
5121 } else {
5122 gen_bhs(t0, qc, t1, t2, esz);
5123 tcg_gen_ext_i64(t0, t0, esz);
5124 }
5125
5126 write_fp_dreg(s, a->rd, t0);
5127 tcg_gen_st_i64(qc, tcg_env, offsetof(CPUARMState, vfp.qc));
5128 return true;
5129 }
5130
TRANS(SQADD_s,do_satacc_s,a,MO_SIGN,MO_SIGN,gen_sqadd_bhs,gen_sqadd_d)5131 TRANS(SQADD_s, do_satacc_s, a, MO_SIGN, MO_SIGN, gen_sqadd_bhs, gen_sqadd_d)
5132 TRANS(SQSUB_s, do_satacc_s, a, MO_SIGN, MO_SIGN, gen_sqsub_bhs, gen_sqsub_d)
5133 TRANS(UQADD_s, do_satacc_s, a, 0, 0, gen_uqadd_bhs, gen_uqadd_d)
5134 TRANS(UQSUB_s, do_satacc_s, a, 0, 0, gen_uqsub_bhs, gen_uqsub_d)
5135 TRANS(SUQADD_s, do_satacc_s, a, MO_SIGN, 0, gen_suqadd_bhs, gen_suqadd_d)
5136 TRANS(USQADD_s, do_satacc_s, a, 0, MO_SIGN, gen_usqadd_bhs, gen_usqadd_d)
5137
5138 static bool do_int3_scalar_d(DisasContext *s, arg_rrr_e *a,
5139 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
5140 {
5141 if (fp_access_check(s)) {
5142 TCGv_i64 t0 = tcg_temp_new_i64();
5143 TCGv_i64 t1 = tcg_temp_new_i64();
5144
5145 read_vec_element(s, t0, a->rn, 0, MO_64);
5146 read_vec_element(s, t1, a->rm, 0, MO_64);
5147 fn(t0, t0, t1);
5148 write_fp_dreg(s, a->rd, t0);
5149 }
5150 return true;
5151 }
5152
5153 TRANS(SSHL_s, do_int3_scalar_d, a, gen_sshl_i64)
5154 TRANS(USHL_s, do_int3_scalar_d, a, gen_ushl_i64)
5155 TRANS(SRSHL_s, do_int3_scalar_d, a, gen_helper_neon_rshl_s64)
5156 TRANS(URSHL_s, do_int3_scalar_d, a, gen_helper_neon_rshl_u64)
5157 TRANS(ADD_s, do_int3_scalar_d, a, tcg_gen_add_i64)
5158 TRANS(SUB_s, do_int3_scalar_d, a, tcg_gen_sub_i64)
5159
5160 typedef struct ENVScalar2 {
5161 NeonGenTwoOpEnvFn *gen_bhs[3];
5162 NeonGenTwo64OpEnvFn *gen_d;
5163 } ENVScalar2;
5164
do_env_scalar2(DisasContext * s,arg_rrr_e * a,const ENVScalar2 * f)5165 static bool do_env_scalar2(DisasContext *s, arg_rrr_e *a, const ENVScalar2 *f)
5166 {
5167 if (!fp_access_check(s)) {
5168 return true;
5169 }
5170 if (a->esz == MO_64) {
5171 TCGv_i64 t0 = read_fp_dreg(s, a->rn);
5172 TCGv_i64 t1 = read_fp_dreg(s, a->rm);
5173 f->gen_d(t0, tcg_env, t0, t1);
5174 write_fp_dreg(s, a->rd, t0);
5175 } else {
5176 TCGv_i32 t0 = tcg_temp_new_i32();
5177 TCGv_i32 t1 = tcg_temp_new_i32();
5178
5179 read_vec_element_i32(s, t0, a->rn, 0, a->esz);
5180 read_vec_element_i32(s, t1, a->rm, 0, a->esz);
5181 f->gen_bhs[a->esz](t0, tcg_env, t0, t1);
5182 write_fp_sreg(s, a->rd, t0);
5183 }
5184 return true;
5185 }
5186
5187 static const ENVScalar2 f_scalar_sqshl = {
5188 { gen_helper_neon_qshl_s8,
5189 gen_helper_neon_qshl_s16,
5190 gen_helper_neon_qshl_s32 },
5191 gen_helper_neon_qshl_s64,
5192 };
5193 TRANS(SQSHL_s, do_env_scalar2, a, &f_scalar_sqshl)
5194
5195 static const ENVScalar2 f_scalar_uqshl = {
5196 { gen_helper_neon_qshl_u8,
5197 gen_helper_neon_qshl_u16,
5198 gen_helper_neon_qshl_u32 },
5199 gen_helper_neon_qshl_u64,
5200 };
5201 TRANS(UQSHL_s, do_env_scalar2, a, &f_scalar_uqshl)
5202
5203 static const ENVScalar2 f_scalar_sqrshl = {
5204 { gen_helper_neon_qrshl_s8,
5205 gen_helper_neon_qrshl_s16,
5206 gen_helper_neon_qrshl_s32 },
5207 gen_helper_neon_qrshl_s64,
5208 };
5209 TRANS(SQRSHL_s, do_env_scalar2, a, &f_scalar_sqrshl)
5210
5211 static const ENVScalar2 f_scalar_uqrshl = {
5212 { gen_helper_neon_qrshl_u8,
5213 gen_helper_neon_qrshl_u16,
5214 gen_helper_neon_qrshl_u32 },
5215 gen_helper_neon_qrshl_u64,
5216 };
5217 TRANS(UQRSHL_s, do_env_scalar2, a, &f_scalar_uqrshl)
5218
do_env_scalar2_hs(DisasContext * s,arg_rrr_e * a,const ENVScalar2 * f)5219 static bool do_env_scalar2_hs(DisasContext *s, arg_rrr_e *a,
5220 const ENVScalar2 *f)
5221 {
5222 if (a->esz == MO_16 || a->esz == MO_32) {
5223 return do_env_scalar2(s, a, f);
5224 }
5225 return false;
5226 }
5227
5228 static const ENVScalar2 f_scalar_sqdmulh = {
5229 { NULL, gen_helper_neon_qdmulh_s16, gen_helper_neon_qdmulh_s32 }
5230 };
5231 TRANS(SQDMULH_s, do_env_scalar2_hs, a, &f_scalar_sqdmulh)
5232
5233 static const ENVScalar2 f_scalar_sqrdmulh = {
5234 { NULL, gen_helper_neon_qrdmulh_s16, gen_helper_neon_qrdmulh_s32 }
5235 };
5236 TRANS(SQRDMULH_s, do_env_scalar2_hs, a, &f_scalar_sqrdmulh)
5237
do_cmop_d(DisasContext * s,arg_rrr_e * a,TCGCond cond)5238 static bool do_cmop_d(DisasContext *s, arg_rrr_e *a, TCGCond cond)
5239 {
5240 if (fp_access_check(s)) {
5241 TCGv_i64 t0 = read_fp_dreg(s, a->rn);
5242 TCGv_i64 t1 = read_fp_dreg(s, a->rm);
5243 tcg_gen_negsetcond_i64(cond, t0, t0, t1);
5244 write_fp_dreg(s, a->rd, t0);
5245 }
5246 return true;
5247 }
5248
TRANS(CMGT_s,do_cmop_d,a,TCG_COND_GT)5249 TRANS(CMGT_s, do_cmop_d, a, TCG_COND_GT)
5250 TRANS(CMHI_s, do_cmop_d, a, TCG_COND_GTU)
5251 TRANS(CMGE_s, do_cmop_d, a, TCG_COND_GE)
5252 TRANS(CMHS_s, do_cmop_d, a, TCG_COND_GEU)
5253 TRANS(CMEQ_s, do_cmop_d, a, TCG_COND_EQ)
5254 TRANS(CMTST_s, do_cmop_d, a, TCG_COND_TSTNE)
5255
5256 static bool do_fp3_vector(DisasContext *s, arg_qrrr_e *a,
5257 gen_helper_gvec_3_ptr * const fns[3])
5258 {
5259 MemOp esz = a->esz;
5260
5261 switch (esz) {
5262 case MO_64:
5263 if (!a->q) {
5264 return false;
5265 }
5266 break;
5267 case MO_32:
5268 break;
5269 case MO_16:
5270 if (!dc_isar_feature(aa64_fp16, s)) {
5271 return false;
5272 }
5273 break;
5274 default:
5275 return false;
5276 }
5277 if (fp_access_check(s)) {
5278 gen_gvec_op3_fpst(s, a->q, a->rd, a->rn, a->rm,
5279 esz == MO_16, 0, fns[esz - 1]);
5280 }
5281 return true;
5282 }
5283
5284 static gen_helper_gvec_3_ptr * const f_vector_fadd[3] = {
5285 gen_helper_gvec_fadd_h,
5286 gen_helper_gvec_fadd_s,
5287 gen_helper_gvec_fadd_d,
5288 };
5289 TRANS(FADD_v, do_fp3_vector, a, f_vector_fadd)
5290
5291 static gen_helper_gvec_3_ptr * const f_vector_fsub[3] = {
5292 gen_helper_gvec_fsub_h,
5293 gen_helper_gvec_fsub_s,
5294 gen_helper_gvec_fsub_d,
5295 };
5296 TRANS(FSUB_v, do_fp3_vector, a, f_vector_fsub)
5297
5298 static gen_helper_gvec_3_ptr * const f_vector_fdiv[3] = {
5299 gen_helper_gvec_fdiv_h,
5300 gen_helper_gvec_fdiv_s,
5301 gen_helper_gvec_fdiv_d,
5302 };
5303 TRANS(FDIV_v, do_fp3_vector, a, f_vector_fdiv)
5304
5305 static gen_helper_gvec_3_ptr * const f_vector_fmul[3] = {
5306 gen_helper_gvec_fmul_h,
5307 gen_helper_gvec_fmul_s,
5308 gen_helper_gvec_fmul_d,
5309 };
5310 TRANS(FMUL_v, do_fp3_vector, a, f_vector_fmul)
5311
5312 static gen_helper_gvec_3_ptr * const f_vector_fmax[3] = {
5313 gen_helper_gvec_fmax_h,
5314 gen_helper_gvec_fmax_s,
5315 gen_helper_gvec_fmax_d,
5316 };
5317 TRANS(FMAX_v, do_fp3_vector, a, f_vector_fmax)
5318
5319 static gen_helper_gvec_3_ptr * const f_vector_fmin[3] = {
5320 gen_helper_gvec_fmin_h,
5321 gen_helper_gvec_fmin_s,
5322 gen_helper_gvec_fmin_d,
5323 };
5324 TRANS(FMIN_v, do_fp3_vector, a, f_vector_fmin)
5325
5326 static gen_helper_gvec_3_ptr * const f_vector_fmaxnm[3] = {
5327 gen_helper_gvec_fmaxnum_h,
5328 gen_helper_gvec_fmaxnum_s,
5329 gen_helper_gvec_fmaxnum_d,
5330 };
5331 TRANS(FMAXNM_v, do_fp3_vector, a, f_vector_fmaxnm)
5332
5333 static gen_helper_gvec_3_ptr * const f_vector_fminnm[3] = {
5334 gen_helper_gvec_fminnum_h,
5335 gen_helper_gvec_fminnum_s,
5336 gen_helper_gvec_fminnum_d,
5337 };
5338 TRANS(FMINNM_v, do_fp3_vector, a, f_vector_fminnm)
5339
5340 static gen_helper_gvec_3_ptr * const f_vector_fmulx[3] = {
5341 gen_helper_gvec_fmulx_h,
5342 gen_helper_gvec_fmulx_s,
5343 gen_helper_gvec_fmulx_d,
5344 };
5345 TRANS(FMULX_v, do_fp3_vector, a, f_vector_fmulx)
5346
5347 static gen_helper_gvec_3_ptr * const f_vector_fmla[3] = {
5348 gen_helper_gvec_vfma_h,
5349 gen_helper_gvec_vfma_s,
5350 gen_helper_gvec_vfma_d,
5351 };
5352 TRANS(FMLA_v, do_fp3_vector, a, f_vector_fmla)
5353
5354 static gen_helper_gvec_3_ptr * const f_vector_fmls[3] = {
5355 gen_helper_gvec_vfms_h,
5356 gen_helper_gvec_vfms_s,
5357 gen_helper_gvec_vfms_d,
5358 };
5359 TRANS(FMLS_v, do_fp3_vector, a, f_vector_fmls)
5360
5361 static gen_helper_gvec_3_ptr * const f_vector_fcmeq[3] = {
5362 gen_helper_gvec_fceq_h,
5363 gen_helper_gvec_fceq_s,
5364 gen_helper_gvec_fceq_d,
5365 };
5366 TRANS(FCMEQ_v, do_fp3_vector, a, f_vector_fcmeq)
5367
5368 static gen_helper_gvec_3_ptr * const f_vector_fcmge[3] = {
5369 gen_helper_gvec_fcge_h,
5370 gen_helper_gvec_fcge_s,
5371 gen_helper_gvec_fcge_d,
5372 };
5373 TRANS(FCMGE_v, do_fp3_vector, a, f_vector_fcmge)
5374
5375 static gen_helper_gvec_3_ptr * const f_vector_fcmgt[3] = {
5376 gen_helper_gvec_fcgt_h,
5377 gen_helper_gvec_fcgt_s,
5378 gen_helper_gvec_fcgt_d,
5379 };
5380 TRANS(FCMGT_v, do_fp3_vector, a, f_vector_fcmgt)
5381
5382 static gen_helper_gvec_3_ptr * const f_vector_facge[3] = {
5383 gen_helper_gvec_facge_h,
5384 gen_helper_gvec_facge_s,
5385 gen_helper_gvec_facge_d,
5386 };
5387 TRANS(FACGE_v, do_fp3_vector, a, f_vector_facge)
5388
5389 static gen_helper_gvec_3_ptr * const f_vector_facgt[3] = {
5390 gen_helper_gvec_facgt_h,
5391 gen_helper_gvec_facgt_s,
5392 gen_helper_gvec_facgt_d,
5393 };
5394 TRANS(FACGT_v, do_fp3_vector, a, f_vector_facgt)
5395
5396 static gen_helper_gvec_3_ptr * const f_vector_fabd[3] = {
5397 gen_helper_gvec_fabd_h,
5398 gen_helper_gvec_fabd_s,
5399 gen_helper_gvec_fabd_d,
5400 };
5401 TRANS(FABD_v, do_fp3_vector, a, f_vector_fabd)
5402
5403 static gen_helper_gvec_3_ptr * const f_vector_frecps[3] = {
5404 gen_helper_gvec_recps_h,
5405 gen_helper_gvec_recps_s,
5406 gen_helper_gvec_recps_d,
5407 };
5408 TRANS(FRECPS_v, do_fp3_vector, a, f_vector_frecps)
5409
5410 static gen_helper_gvec_3_ptr * const f_vector_frsqrts[3] = {
5411 gen_helper_gvec_rsqrts_h,
5412 gen_helper_gvec_rsqrts_s,
5413 gen_helper_gvec_rsqrts_d,
5414 };
5415 TRANS(FRSQRTS_v, do_fp3_vector, a, f_vector_frsqrts)
5416
5417 static gen_helper_gvec_3_ptr * const f_vector_faddp[3] = {
5418 gen_helper_gvec_faddp_h,
5419 gen_helper_gvec_faddp_s,
5420 gen_helper_gvec_faddp_d,
5421 };
5422 TRANS(FADDP_v, do_fp3_vector, a, f_vector_faddp)
5423
5424 static gen_helper_gvec_3_ptr * const f_vector_fmaxp[3] = {
5425 gen_helper_gvec_fmaxp_h,
5426 gen_helper_gvec_fmaxp_s,
5427 gen_helper_gvec_fmaxp_d,
5428 };
5429 TRANS(FMAXP_v, do_fp3_vector, a, f_vector_fmaxp)
5430
5431 static gen_helper_gvec_3_ptr * const f_vector_fminp[3] = {
5432 gen_helper_gvec_fminp_h,
5433 gen_helper_gvec_fminp_s,
5434 gen_helper_gvec_fminp_d,
5435 };
5436 TRANS(FMINP_v, do_fp3_vector, a, f_vector_fminp)
5437
5438 static gen_helper_gvec_3_ptr * const f_vector_fmaxnmp[3] = {
5439 gen_helper_gvec_fmaxnump_h,
5440 gen_helper_gvec_fmaxnump_s,
5441 gen_helper_gvec_fmaxnump_d,
5442 };
5443 TRANS(FMAXNMP_v, do_fp3_vector, a, f_vector_fmaxnmp)
5444
5445 static gen_helper_gvec_3_ptr * const f_vector_fminnmp[3] = {
5446 gen_helper_gvec_fminnump_h,
5447 gen_helper_gvec_fminnump_s,
5448 gen_helper_gvec_fminnump_d,
5449 };
TRANS(FMINNMP_v,do_fp3_vector,a,f_vector_fminnmp)5450 TRANS(FMINNMP_v, do_fp3_vector, a, f_vector_fminnmp)
5451
5452 static bool do_fmlal(DisasContext *s, arg_qrrr_e *a, bool is_s, bool is_2)
5453 {
5454 if (fp_access_check(s)) {
5455 int data = (is_2 << 1) | is_s;
5456 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
5457 vec_full_reg_offset(s, a->rn),
5458 vec_full_reg_offset(s, a->rm), tcg_env,
5459 a->q ? 16 : 8, vec_full_reg_size(s),
5460 data, gen_helper_gvec_fmlal_a64);
5461 }
5462 return true;
5463 }
5464
TRANS_FEAT(FMLAL_v,aa64_fhm,do_fmlal,a,false,false)5465 TRANS_FEAT(FMLAL_v, aa64_fhm, do_fmlal, a, false, false)
5466 TRANS_FEAT(FMLSL_v, aa64_fhm, do_fmlal, a, true, false)
5467 TRANS_FEAT(FMLAL2_v, aa64_fhm, do_fmlal, a, false, true)
5468 TRANS_FEAT(FMLSL2_v, aa64_fhm, do_fmlal, a, true, true)
5469
5470 TRANS(ADDP_v, do_gvec_fn3, a, gen_gvec_addp)
5471 TRANS(SMAXP_v, do_gvec_fn3_no64, a, gen_gvec_smaxp)
5472 TRANS(SMINP_v, do_gvec_fn3_no64, a, gen_gvec_sminp)
5473 TRANS(UMAXP_v, do_gvec_fn3_no64, a, gen_gvec_umaxp)
5474 TRANS(UMINP_v, do_gvec_fn3_no64, a, gen_gvec_uminp)
5475
5476 TRANS(AND_v, do_gvec_fn3, a, tcg_gen_gvec_and)
5477 TRANS(BIC_v, do_gvec_fn3, a, tcg_gen_gvec_andc)
5478 TRANS(ORR_v, do_gvec_fn3, a, tcg_gen_gvec_or)
5479 TRANS(ORN_v, do_gvec_fn3, a, tcg_gen_gvec_orc)
5480 TRANS(EOR_v, do_gvec_fn3, a, tcg_gen_gvec_xor)
5481
5482 static bool do_bitsel(DisasContext *s, bool is_q, int d, int a, int b, int c)
5483 {
5484 if (fp_access_check(s)) {
5485 gen_gvec_fn4(s, is_q, d, a, b, c, tcg_gen_gvec_bitsel, 0);
5486 }
5487 return true;
5488 }
5489
5490 TRANS(BSL_v, do_bitsel, a->q, a->rd, a->rd, a->rn, a->rm)
5491 TRANS(BIT_v, do_bitsel, a->q, a->rd, a->rm, a->rn, a->rd)
5492 TRANS(BIF_v, do_bitsel, a->q, a->rd, a->rm, a->rd, a->rn)
5493
TRANS(SQADD_v,do_gvec_fn3,a,gen_gvec_sqadd_qc)5494 TRANS(SQADD_v, do_gvec_fn3, a, gen_gvec_sqadd_qc)
5495 TRANS(UQADD_v, do_gvec_fn3, a, gen_gvec_uqadd_qc)
5496 TRANS(SQSUB_v, do_gvec_fn3, a, gen_gvec_sqsub_qc)
5497 TRANS(UQSUB_v, do_gvec_fn3, a, gen_gvec_uqsub_qc)
5498 TRANS(SUQADD_v, do_gvec_fn3, a, gen_gvec_suqadd_qc)
5499 TRANS(USQADD_v, do_gvec_fn3, a, gen_gvec_usqadd_qc)
5500
5501 TRANS(SSHL_v, do_gvec_fn3, a, gen_gvec_sshl)
5502 TRANS(USHL_v, do_gvec_fn3, a, gen_gvec_ushl)
5503 TRANS(SRSHL_v, do_gvec_fn3, a, gen_gvec_srshl)
5504 TRANS(URSHL_v, do_gvec_fn3, a, gen_gvec_urshl)
5505 TRANS(SQSHL_v, do_gvec_fn3, a, gen_neon_sqshl)
5506 TRANS(UQSHL_v, do_gvec_fn3, a, gen_neon_uqshl)
5507 TRANS(SQRSHL_v, do_gvec_fn3, a, gen_neon_sqrshl)
5508 TRANS(UQRSHL_v, do_gvec_fn3, a, gen_neon_uqrshl)
5509
5510 TRANS(ADD_v, do_gvec_fn3, a, tcg_gen_gvec_add)
5511 TRANS(SUB_v, do_gvec_fn3, a, tcg_gen_gvec_sub)
5512 TRANS(SHADD_v, do_gvec_fn3_no64, a, gen_gvec_shadd)
5513 TRANS(UHADD_v, do_gvec_fn3_no64, a, gen_gvec_uhadd)
5514 TRANS(SHSUB_v, do_gvec_fn3_no64, a, gen_gvec_shsub)
5515 TRANS(UHSUB_v, do_gvec_fn3_no64, a, gen_gvec_uhsub)
5516 TRANS(SRHADD_v, do_gvec_fn3_no64, a, gen_gvec_srhadd)
5517 TRANS(URHADD_v, do_gvec_fn3_no64, a, gen_gvec_urhadd)
5518 TRANS(SMAX_v, do_gvec_fn3_no64, a, tcg_gen_gvec_smax)
5519 TRANS(UMAX_v, do_gvec_fn3_no64, a, tcg_gen_gvec_umax)
5520 TRANS(SMIN_v, do_gvec_fn3_no64, a, tcg_gen_gvec_smin)
5521 TRANS(UMIN_v, do_gvec_fn3_no64, a, tcg_gen_gvec_umin)
5522 TRANS(SABA_v, do_gvec_fn3_no64, a, gen_gvec_saba)
5523 TRANS(UABA_v, do_gvec_fn3_no64, a, gen_gvec_uaba)
5524 TRANS(SABD_v, do_gvec_fn3_no64, a, gen_gvec_sabd)
5525 TRANS(UABD_v, do_gvec_fn3_no64, a, gen_gvec_uabd)
5526 TRANS(MUL_v, do_gvec_fn3_no64, a, tcg_gen_gvec_mul)
5527 TRANS(PMUL_v, do_gvec_op3_ool, a, 0, gen_helper_gvec_pmul_b)
5528 TRANS(MLA_v, do_gvec_fn3_no64, a, gen_gvec_mla)
5529 TRANS(MLS_v, do_gvec_fn3_no64, a, gen_gvec_mls)
5530
5531 static bool do_cmop_v(DisasContext *s, arg_qrrr_e *a, TCGCond cond)
5532 {
5533 if (a->esz == MO_64 && !a->q) {
5534 return false;
5535 }
5536 if (fp_access_check(s)) {
5537 tcg_gen_gvec_cmp(cond, a->esz,
5538 vec_full_reg_offset(s, a->rd),
5539 vec_full_reg_offset(s, a->rn),
5540 vec_full_reg_offset(s, a->rm),
5541 a->q ? 16 : 8, vec_full_reg_size(s));
5542 }
5543 return true;
5544 }
5545
TRANS(CMGT_v,do_cmop_v,a,TCG_COND_GT)5546 TRANS(CMGT_v, do_cmop_v, a, TCG_COND_GT)
5547 TRANS(CMHI_v, do_cmop_v, a, TCG_COND_GTU)
5548 TRANS(CMGE_v, do_cmop_v, a, TCG_COND_GE)
5549 TRANS(CMHS_v, do_cmop_v, a, TCG_COND_GEU)
5550 TRANS(CMEQ_v, do_cmop_v, a, TCG_COND_EQ)
5551 TRANS(CMTST_v, do_gvec_fn3, a, gen_gvec_cmtst)
5552
5553 TRANS(SQDMULH_v, do_gvec_fn3_no8_no64, a, gen_gvec_sqdmulh_qc)
5554 TRANS(SQRDMULH_v, do_gvec_fn3_no8_no64, a, gen_gvec_sqrdmulh_qc)
5555
5556 /*
5557 * Advanced SIMD scalar/vector x indexed element
5558 */
5559
5560 static bool do_fp3_scalar_idx(DisasContext *s, arg_rrx_e *a, const FPScalar *f)
5561 {
5562 switch (a->esz) {
5563 case MO_64:
5564 if (fp_access_check(s)) {
5565 TCGv_i64 t0 = read_fp_dreg(s, a->rn);
5566 TCGv_i64 t1 = tcg_temp_new_i64();
5567
5568 read_vec_element(s, t1, a->rm, a->idx, MO_64);
5569 f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_FPCR));
5570 write_fp_dreg(s, a->rd, t0);
5571 }
5572 break;
5573 case MO_32:
5574 if (fp_access_check(s)) {
5575 TCGv_i32 t0 = read_fp_sreg(s, a->rn);
5576 TCGv_i32 t1 = tcg_temp_new_i32();
5577
5578 read_vec_element_i32(s, t1, a->rm, a->idx, MO_32);
5579 f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_FPCR));
5580 write_fp_sreg(s, a->rd, t0);
5581 }
5582 break;
5583 case MO_16:
5584 if (!dc_isar_feature(aa64_fp16, s)) {
5585 return false;
5586 }
5587 if (fp_access_check(s)) {
5588 TCGv_i32 t0 = read_fp_hreg(s, a->rn);
5589 TCGv_i32 t1 = tcg_temp_new_i32();
5590
5591 read_vec_element_i32(s, t1, a->rm, a->idx, MO_16);
5592 f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_FPCR_F16));
5593 write_fp_sreg(s, a->rd, t0);
5594 }
5595 break;
5596 default:
5597 g_assert_not_reached();
5598 }
5599 return true;
5600 }
5601
5602 TRANS(FMUL_si, do_fp3_scalar_idx, a, &f_scalar_fmul)
5603 TRANS(FMULX_si, do_fp3_scalar_idx, a, &f_scalar_fmulx)
5604
do_fmla_scalar_idx(DisasContext * s,arg_rrx_e * a,bool neg)5605 static bool do_fmla_scalar_idx(DisasContext *s, arg_rrx_e *a, bool neg)
5606 {
5607 switch (a->esz) {
5608 case MO_64:
5609 if (fp_access_check(s)) {
5610 TCGv_i64 t0 = read_fp_dreg(s, a->rd);
5611 TCGv_i64 t1 = read_fp_dreg(s, a->rn);
5612 TCGv_i64 t2 = tcg_temp_new_i64();
5613
5614 read_vec_element(s, t2, a->rm, a->idx, MO_64);
5615 if (neg) {
5616 gen_vfp_negd(t1, t1);
5617 }
5618 gen_helper_vfp_muladdd(t0, t1, t2, t0, fpstatus_ptr(FPST_FPCR));
5619 write_fp_dreg(s, a->rd, t0);
5620 }
5621 break;
5622 case MO_32:
5623 if (fp_access_check(s)) {
5624 TCGv_i32 t0 = read_fp_sreg(s, a->rd);
5625 TCGv_i32 t1 = read_fp_sreg(s, a->rn);
5626 TCGv_i32 t2 = tcg_temp_new_i32();
5627
5628 read_vec_element_i32(s, t2, a->rm, a->idx, MO_32);
5629 if (neg) {
5630 gen_vfp_negs(t1, t1);
5631 }
5632 gen_helper_vfp_muladds(t0, t1, t2, t0, fpstatus_ptr(FPST_FPCR));
5633 write_fp_sreg(s, a->rd, t0);
5634 }
5635 break;
5636 case MO_16:
5637 if (!dc_isar_feature(aa64_fp16, s)) {
5638 return false;
5639 }
5640 if (fp_access_check(s)) {
5641 TCGv_i32 t0 = read_fp_hreg(s, a->rd);
5642 TCGv_i32 t1 = read_fp_hreg(s, a->rn);
5643 TCGv_i32 t2 = tcg_temp_new_i32();
5644
5645 read_vec_element_i32(s, t2, a->rm, a->idx, MO_16);
5646 if (neg) {
5647 gen_vfp_negh(t1, t1);
5648 }
5649 gen_helper_advsimd_muladdh(t0, t1, t2, t0,
5650 fpstatus_ptr(FPST_FPCR_F16));
5651 write_fp_sreg(s, a->rd, t0);
5652 }
5653 break;
5654 default:
5655 g_assert_not_reached();
5656 }
5657 return true;
5658 }
5659
TRANS(FMLA_si,do_fmla_scalar_idx,a,false)5660 TRANS(FMLA_si, do_fmla_scalar_idx, a, false)
5661 TRANS(FMLS_si, do_fmla_scalar_idx, a, true)
5662
5663 static bool do_env_scalar2_idx_hs(DisasContext *s, arg_rrx_e *a,
5664 const ENVScalar2 *f)
5665 {
5666 if (a->esz < MO_16 || a->esz > MO_32) {
5667 return false;
5668 }
5669 if (fp_access_check(s)) {
5670 TCGv_i32 t0 = tcg_temp_new_i32();
5671 TCGv_i32 t1 = tcg_temp_new_i32();
5672
5673 read_vec_element_i32(s, t0, a->rn, 0, a->esz);
5674 read_vec_element_i32(s, t1, a->rm, a->idx, a->esz);
5675 f->gen_bhs[a->esz](t0, tcg_env, t0, t1);
5676 write_fp_sreg(s, a->rd, t0);
5677 }
5678 return true;
5679 }
5680
5681 TRANS(SQDMULH_si, do_env_scalar2_idx_hs, a, &f_scalar_sqdmulh)
5682 TRANS(SQRDMULH_si, do_env_scalar2_idx_hs, a, &f_scalar_sqrdmulh)
5683
do_fp3_vector_idx(DisasContext * s,arg_qrrx_e * a,gen_helper_gvec_3_ptr * const fns[3])5684 static bool do_fp3_vector_idx(DisasContext *s, arg_qrrx_e *a,
5685 gen_helper_gvec_3_ptr * const fns[3])
5686 {
5687 MemOp esz = a->esz;
5688
5689 switch (esz) {
5690 case MO_64:
5691 if (!a->q) {
5692 return false;
5693 }
5694 break;
5695 case MO_32:
5696 break;
5697 case MO_16:
5698 if (!dc_isar_feature(aa64_fp16, s)) {
5699 return false;
5700 }
5701 break;
5702 default:
5703 g_assert_not_reached();
5704 }
5705 if (fp_access_check(s)) {
5706 gen_gvec_op3_fpst(s, a->q, a->rd, a->rn, a->rm,
5707 esz == MO_16, a->idx, fns[esz - 1]);
5708 }
5709 return true;
5710 }
5711
5712 static gen_helper_gvec_3_ptr * const f_vector_idx_fmul[3] = {
5713 gen_helper_gvec_fmul_idx_h,
5714 gen_helper_gvec_fmul_idx_s,
5715 gen_helper_gvec_fmul_idx_d,
5716 };
5717 TRANS(FMUL_vi, do_fp3_vector_idx, a, f_vector_idx_fmul)
5718
5719 static gen_helper_gvec_3_ptr * const f_vector_idx_fmulx[3] = {
5720 gen_helper_gvec_fmulx_idx_h,
5721 gen_helper_gvec_fmulx_idx_s,
5722 gen_helper_gvec_fmulx_idx_d,
5723 };
TRANS(FMULX_vi,do_fp3_vector_idx,a,f_vector_idx_fmulx)5724 TRANS(FMULX_vi, do_fp3_vector_idx, a, f_vector_idx_fmulx)
5725
5726 static bool do_fmla_vector_idx(DisasContext *s, arg_qrrx_e *a, bool neg)
5727 {
5728 static gen_helper_gvec_4_ptr * const fns[3] = {
5729 gen_helper_gvec_fmla_idx_h,
5730 gen_helper_gvec_fmla_idx_s,
5731 gen_helper_gvec_fmla_idx_d,
5732 };
5733 MemOp esz = a->esz;
5734
5735 switch (esz) {
5736 case MO_64:
5737 if (!a->q) {
5738 return false;
5739 }
5740 break;
5741 case MO_32:
5742 break;
5743 case MO_16:
5744 if (!dc_isar_feature(aa64_fp16, s)) {
5745 return false;
5746 }
5747 break;
5748 default:
5749 g_assert_not_reached();
5750 }
5751 if (fp_access_check(s)) {
5752 gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd,
5753 esz == MO_16, (a->idx << 1) | neg,
5754 fns[esz - 1]);
5755 }
5756 return true;
5757 }
5758
TRANS(FMLA_vi,do_fmla_vector_idx,a,false)5759 TRANS(FMLA_vi, do_fmla_vector_idx, a, false)
5760 TRANS(FMLS_vi, do_fmla_vector_idx, a, true)
5761
5762 static bool do_fmlal_idx(DisasContext *s, arg_qrrx_e *a, bool is_s, bool is_2)
5763 {
5764 if (fp_access_check(s)) {
5765 int data = (a->idx << 2) | (is_2 << 1) | is_s;
5766 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
5767 vec_full_reg_offset(s, a->rn),
5768 vec_full_reg_offset(s, a->rm), tcg_env,
5769 a->q ? 16 : 8, vec_full_reg_size(s),
5770 data, gen_helper_gvec_fmlal_idx_a64);
5771 }
5772 return true;
5773 }
5774
TRANS_FEAT(FMLAL_vi,aa64_fhm,do_fmlal_idx,a,false,false)5775 TRANS_FEAT(FMLAL_vi, aa64_fhm, do_fmlal_idx, a, false, false)
5776 TRANS_FEAT(FMLSL_vi, aa64_fhm, do_fmlal_idx, a, true, false)
5777 TRANS_FEAT(FMLAL2_vi, aa64_fhm, do_fmlal_idx, a, false, true)
5778 TRANS_FEAT(FMLSL2_vi, aa64_fhm, do_fmlal_idx, a, true, true)
5779
5780 static bool do_int3_vector_idx(DisasContext *s, arg_qrrx_e *a,
5781 gen_helper_gvec_3 * const fns[2])
5782 {
5783 assert(a->esz == MO_16 || a->esz == MO_32);
5784 if (fp_access_check(s)) {
5785 gen_gvec_op3_ool(s, a->q, a->rd, a->rn, a->rm, a->idx, fns[a->esz - 1]);
5786 }
5787 return true;
5788 }
5789
5790 static gen_helper_gvec_3 * const f_vector_idx_mul[2] = {
5791 gen_helper_gvec_mul_idx_h,
5792 gen_helper_gvec_mul_idx_s,
5793 };
TRANS(MUL_vi,do_int3_vector_idx,a,f_vector_idx_mul)5794 TRANS(MUL_vi, do_int3_vector_idx, a, f_vector_idx_mul)
5795
5796 static bool do_mla_vector_idx(DisasContext *s, arg_qrrx_e *a, bool sub)
5797 {
5798 static gen_helper_gvec_4 * const fns[2][2] = {
5799 { gen_helper_gvec_mla_idx_h, gen_helper_gvec_mls_idx_h },
5800 { gen_helper_gvec_mla_idx_s, gen_helper_gvec_mls_idx_s },
5801 };
5802
5803 assert(a->esz == MO_16 || a->esz == MO_32);
5804 if (fp_access_check(s)) {
5805 gen_gvec_op4_ool(s, a->q, a->rd, a->rn, a->rm, a->rd,
5806 a->idx, fns[a->esz - 1][sub]);
5807 }
5808 return true;
5809 }
5810
TRANS(MLA_vi,do_mla_vector_idx,a,false)5811 TRANS(MLA_vi, do_mla_vector_idx, a, false)
5812 TRANS(MLS_vi, do_mla_vector_idx, a, true)
5813
5814 static bool do_int3_qc_vector_idx(DisasContext *s, arg_qrrx_e *a,
5815 gen_helper_gvec_4 * const fns[2])
5816 {
5817 assert(a->esz == MO_16 || a->esz == MO_32);
5818 if (fp_access_check(s)) {
5819 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, a->rd),
5820 vec_full_reg_offset(s, a->rn),
5821 vec_full_reg_offset(s, a->rm),
5822 offsetof(CPUARMState, vfp.qc),
5823 a->q ? 16 : 8, vec_full_reg_size(s),
5824 a->idx, fns[a->esz - 1]);
5825 }
5826 return true;
5827 }
5828
5829 static gen_helper_gvec_4 * const f_vector_idx_sqdmulh[2] = {
5830 gen_helper_neon_sqdmulh_idx_h,
5831 gen_helper_neon_sqdmulh_idx_s,
5832 };
5833 TRANS(SQDMULH_vi, do_int3_qc_vector_idx, a, f_vector_idx_sqdmulh)
5834
5835 static gen_helper_gvec_4 * const f_vector_idx_sqrdmulh[2] = {
5836 gen_helper_neon_sqrdmulh_idx_h,
5837 gen_helper_neon_sqrdmulh_idx_s,
5838 };
TRANS(SQRDMULH_vi,do_int3_qc_vector_idx,a,f_vector_idx_sqrdmulh)5839 TRANS(SQRDMULH_vi, do_int3_qc_vector_idx, a, f_vector_idx_sqrdmulh)
5840
5841 /*
5842 * Advanced SIMD scalar pairwise
5843 */
5844
5845 static bool do_fp3_scalar_pair(DisasContext *s, arg_rr_e *a, const FPScalar *f)
5846 {
5847 switch (a->esz) {
5848 case MO_64:
5849 if (fp_access_check(s)) {
5850 TCGv_i64 t0 = tcg_temp_new_i64();
5851 TCGv_i64 t1 = tcg_temp_new_i64();
5852
5853 read_vec_element(s, t0, a->rn, 0, MO_64);
5854 read_vec_element(s, t1, a->rn, 1, MO_64);
5855 f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_FPCR));
5856 write_fp_dreg(s, a->rd, t0);
5857 }
5858 break;
5859 case MO_32:
5860 if (fp_access_check(s)) {
5861 TCGv_i32 t0 = tcg_temp_new_i32();
5862 TCGv_i32 t1 = tcg_temp_new_i32();
5863
5864 read_vec_element_i32(s, t0, a->rn, 0, MO_32);
5865 read_vec_element_i32(s, t1, a->rn, 1, MO_32);
5866 f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_FPCR));
5867 write_fp_sreg(s, a->rd, t0);
5868 }
5869 break;
5870 case MO_16:
5871 if (!dc_isar_feature(aa64_fp16, s)) {
5872 return false;
5873 }
5874 if (fp_access_check(s)) {
5875 TCGv_i32 t0 = tcg_temp_new_i32();
5876 TCGv_i32 t1 = tcg_temp_new_i32();
5877
5878 read_vec_element_i32(s, t0, a->rn, 0, MO_16);
5879 read_vec_element_i32(s, t1, a->rn, 1, MO_16);
5880 f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_FPCR_F16));
5881 write_fp_sreg(s, a->rd, t0);
5882 }
5883 break;
5884 default:
5885 g_assert_not_reached();
5886 }
5887 return true;
5888 }
5889
5890 TRANS(FADDP_s, do_fp3_scalar_pair, a, &f_scalar_fadd)
5891 TRANS(FMAXP_s, do_fp3_scalar_pair, a, &f_scalar_fmax)
5892 TRANS(FMINP_s, do_fp3_scalar_pair, a, &f_scalar_fmin)
5893 TRANS(FMAXNMP_s, do_fp3_scalar_pair, a, &f_scalar_fmaxnm)
5894 TRANS(FMINNMP_s, do_fp3_scalar_pair, a, &f_scalar_fminnm)
5895
trans_ADDP_s(DisasContext * s,arg_rr_e * a)5896 static bool trans_ADDP_s(DisasContext *s, arg_rr_e *a)
5897 {
5898 if (fp_access_check(s)) {
5899 TCGv_i64 t0 = tcg_temp_new_i64();
5900 TCGv_i64 t1 = tcg_temp_new_i64();
5901
5902 read_vec_element(s, t0, a->rn, 0, MO_64);
5903 read_vec_element(s, t1, a->rn, 1, MO_64);
5904 tcg_gen_add_i64(t0, t0, t1);
5905 write_fp_dreg(s, a->rd, t0);
5906 }
5907 return true;
5908 }
5909
5910 /*
5911 * Floating-point conditional select
5912 */
5913
trans_FCSEL(DisasContext * s,arg_FCSEL * a)5914 static bool trans_FCSEL(DisasContext *s, arg_FCSEL *a)
5915 {
5916 TCGv_i64 t_true, t_false;
5917 DisasCompare64 c;
5918
5919 switch (a->esz) {
5920 case MO_32:
5921 case MO_64:
5922 break;
5923 case MO_16:
5924 if (!dc_isar_feature(aa64_fp16, s)) {
5925 return false;
5926 }
5927 break;
5928 default:
5929 return false;
5930 }
5931
5932 if (!fp_access_check(s)) {
5933 return true;
5934 }
5935
5936 /* Zero extend sreg & hreg inputs to 64 bits now. */
5937 t_true = tcg_temp_new_i64();
5938 t_false = tcg_temp_new_i64();
5939 read_vec_element(s, t_true, a->rn, 0, a->esz);
5940 read_vec_element(s, t_false, a->rm, 0, a->esz);
5941
5942 a64_test_cc(&c, a->cond);
5943 tcg_gen_movcond_i64(c.cond, t_true, c.value, tcg_constant_i64(0),
5944 t_true, t_false);
5945
5946 /*
5947 * Note that sregs & hregs write back zeros to the high bits,
5948 * and we've already done the zero-extension.
5949 */
5950 write_fp_dreg(s, a->rd, t_true);
5951 return true;
5952 }
5953
5954 /*
5955 * Floating-point data-processing (3 source)
5956 */
5957
do_fmadd(DisasContext * s,arg_rrrr_e * a,bool neg_a,bool neg_n)5958 static bool do_fmadd(DisasContext *s, arg_rrrr_e *a, bool neg_a, bool neg_n)
5959 {
5960 TCGv_ptr fpst;
5961
5962 /*
5963 * These are fused multiply-add. Note that doing the negations here
5964 * as separate steps is correct: an input NaN should come out with
5965 * its sign bit flipped if it is a negated-input.
5966 */
5967 switch (a->esz) {
5968 case MO_64:
5969 if (fp_access_check(s)) {
5970 TCGv_i64 tn = read_fp_dreg(s, a->rn);
5971 TCGv_i64 tm = read_fp_dreg(s, a->rm);
5972 TCGv_i64 ta = read_fp_dreg(s, a->ra);
5973
5974 if (neg_a) {
5975 gen_vfp_negd(ta, ta);
5976 }
5977 if (neg_n) {
5978 gen_vfp_negd(tn, tn);
5979 }
5980 fpst = fpstatus_ptr(FPST_FPCR);
5981 gen_helper_vfp_muladdd(ta, tn, tm, ta, fpst);
5982 write_fp_dreg(s, a->rd, ta);
5983 }
5984 break;
5985
5986 case MO_32:
5987 if (fp_access_check(s)) {
5988 TCGv_i32 tn = read_fp_sreg(s, a->rn);
5989 TCGv_i32 tm = read_fp_sreg(s, a->rm);
5990 TCGv_i32 ta = read_fp_sreg(s, a->ra);
5991
5992 if (neg_a) {
5993 gen_vfp_negs(ta, ta);
5994 }
5995 if (neg_n) {
5996 gen_vfp_negs(tn, tn);
5997 }
5998 fpst = fpstatus_ptr(FPST_FPCR);
5999 gen_helper_vfp_muladds(ta, tn, tm, ta, fpst);
6000 write_fp_sreg(s, a->rd, ta);
6001 }
6002 break;
6003
6004 case MO_16:
6005 if (!dc_isar_feature(aa64_fp16, s)) {
6006 return false;
6007 }
6008 if (fp_access_check(s)) {
6009 TCGv_i32 tn = read_fp_hreg(s, a->rn);
6010 TCGv_i32 tm = read_fp_hreg(s, a->rm);
6011 TCGv_i32 ta = read_fp_hreg(s, a->ra);
6012
6013 if (neg_a) {
6014 gen_vfp_negh(ta, ta);
6015 }
6016 if (neg_n) {
6017 gen_vfp_negh(tn, tn);
6018 }
6019 fpst = fpstatus_ptr(FPST_FPCR_F16);
6020 gen_helper_advsimd_muladdh(ta, tn, tm, ta, fpst);
6021 write_fp_sreg(s, a->rd, ta);
6022 }
6023 break;
6024
6025 default:
6026 return false;
6027 }
6028 return true;
6029 }
6030
TRANS(FMADD,do_fmadd,a,false,false)6031 TRANS(FMADD, do_fmadd, a, false, false)
6032 TRANS(FNMADD, do_fmadd, a, true, true)
6033 TRANS(FMSUB, do_fmadd, a, false, true)
6034 TRANS(FNMSUB, do_fmadd, a, true, false)
6035
6036 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
6037 * Note that it is the caller's responsibility to ensure that the
6038 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
6039 * mandated semantics for out of range shifts.
6040 */
6041 static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
6042 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
6043 {
6044 switch (shift_type) {
6045 case A64_SHIFT_TYPE_LSL:
6046 tcg_gen_shl_i64(dst, src, shift_amount);
6047 break;
6048 case A64_SHIFT_TYPE_LSR:
6049 tcg_gen_shr_i64(dst, src, shift_amount);
6050 break;
6051 case A64_SHIFT_TYPE_ASR:
6052 if (!sf) {
6053 tcg_gen_ext32s_i64(dst, src);
6054 }
6055 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
6056 break;
6057 case A64_SHIFT_TYPE_ROR:
6058 if (sf) {
6059 tcg_gen_rotr_i64(dst, src, shift_amount);
6060 } else {
6061 TCGv_i32 t0, t1;
6062 t0 = tcg_temp_new_i32();
6063 t1 = tcg_temp_new_i32();
6064 tcg_gen_extrl_i64_i32(t0, src);
6065 tcg_gen_extrl_i64_i32(t1, shift_amount);
6066 tcg_gen_rotr_i32(t0, t0, t1);
6067 tcg_gen_extu_i32_i64(dst, t0);
6068 }
6069 break;
6070 default:
6071 assert(FALSE); /* all shift types should be handled */
6072 break;
6073 }
6074
6075 if (!sf) { /* zero extend final result */
6076 tcg_gen_ext32u_i64(dst, dst);
6077 }
6078 }
6079
6080 /* Shift a TCGv src by immediate, put result in dst.
6081 * The shift amount must be in range (this should always be true as the
6082 * relevant instructions will UNDEF on bad shift immediates).
6083 */
shift_reg_imm(TCGv_i64 dst,TCGv_i64 src,int sf,enum a64_shift_type shift_type,unsigned int shift_i)6084 static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
6085 enum a64_shift_type shift_type, unsigned int shift_i)
6086 {
6087 assert(shift_i < (sf ? 64 : 32));
6088
6089 if (shift_i == 0) {
6090 tcg_gen_mov_i64(dst, src);
6091 } else {
6092 shift_reg(dst, src, sf, shift_type, tcg_constant_i64(shift_i));
6093 }
6094 }
6095
6096 /* Logical (shifted register)
6097 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
6098 * +----+-----+-----------+-------+---+------+--------+------+------+
6099 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
6100 * +----+-----+-----------+-------+---+------+--------+------+------+
6101 */
disas_logic_reg(DisasContext * s,uint32_t insn)6102 static void disas_logic_reg(DisasContext *s, uint32_t insn)
6103 {
6104 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
6105 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
6106
6107 sf = extract32(insn, 31, 1);
6108 opc = extract32(insn, 29, 2);
6109 shift_type = extract32(insn, 22, 2);
6110 invert = extract32(insn, 21, 1);
6111 rm = extract32(insn, 16, 5);
6112 shift_amount = extract32(insn, 10, 6);
6113 rn = extract32(insn, 5, 5);
6114 rd = extract32(insn, 0, 5);
6115
6116 if (!sf && (shift_amount & (1 << 5))) {
6117 unallocated_encoding(s);
6118 return;
6119 }
6120
6121 tcg_rd = cpu_reg(s, rd);
6122
6123 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
6124 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
6125 * register-register MOV and MVN, so it is worth special casing.
6126 */
6127 tcg_rm = cpu_reg(s, rm);
6128 if (invert) {
6129 tcg_gen_not_i64(tcg_rd, tcg_rm);
6130 if (!sf) {
6131 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
6132 }
6133 } else {
6134 if (sf) {
6135 tcg_gen_mov_i64(tcg_rd, tcg_rm);
6136 } else {
6137 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
6138 }
6139 }
6140 return;
6141 }
6142
6143 tcg_rm = read_cpu_reg(s, rm, sf);
6144
6145 if (shift_amount) {
6146 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
6147 }
6148
6149 tcg_rn = cpu_reg(s, rn);
6150
6151 switch (opc | (invert << 2)) {
6152 case 0: /* AND */
6153 case 3: /* ANDS */
6154 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
6155 break;
6156 case 1: /* ORR */
6157 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
6158 break;
6159 case 2: /* EOR */
6160 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
6161 break;
6162 case 4: /* BIC */
6163 case 7: /* BICS */
6164 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
6165 break;
6166 case 5: /* ORN */
6167 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
6168 break;
6169 case 6: /* EON */
6170 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
6171 break;
6172 default:
6173 assert(FALSE);
6174 break;
6175 }
6176
6177 if (!sf) {
6178 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
6179 }
6180
6181 if (opc == 3) {
6182 gen_logic_CC(sf, tcg_rd);
6183 }
6184 }
6185
6186 /*
6187 * Add/subtract (extended register)
6188 *
6189 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
6190 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
6191 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
6192 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
6193 *
6194 * sf: 0 -> 32bit, 1 -> 64bit
6195 * op: 0 -> add , 1 -> sub
6196 * S: 1 -> set flags
6197 * opt: 00
6198 * option: extension type (see DecodeRegExtend)
6199 * imm3: optional shift to Rm
6200 *
6201 * Rd = Rn + LSL(extend(Rm), amount)
6202 */
disas_add_sub_ext_reg(DisasContext * s,uint32_t insn)6203 static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
6204 {
6205 int rd = extract32(insn, 0, 5);
6206 int rn = extract32(insn, 5, 5);
6207 int imm3 = extract32(insn, 10, 3);
6208 int option = extract32(insn, 13, 3);
6209 int rm = extract32(insn, 16, 5);
6210 int opt = extract32(insn, 22, 2);
6211 bool setflags = extract32(insn, 29, 1);
6212 bool sub_op = extract32(insn, 30, 1);
6213 bool sf = extract32(insn, 31, 1);
6214
6215 TCGv_i64 tcg_rm, tcg_rn; /* temps */
6216 TCGv_i64 tcg_rd;
6217 TCGv_i64 tcg_result;
6218
6219 if (imm3 > 4 || opt != 0) {
6220 unallocated_encoding(s);
6221 return;
6222 }
6223
6224 /* non-flag setting ops may use SP */
6225 if (!setflags) {
6226 tcg_rd = cpu_reg_sp(s, rd);
6227 } else {
6228 tcg_rd = cpu_reg(s, rd);
6229 }
6230 tcg_rn = read_cpu_reg_sp(s, rn, sf);
6231
6232 tcg_rm = read_cpu_reg(s, rm, sf);
6233 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
6234
6235 tcg_result = tcg_temp_new_i64();
6236
6237 if (!setflags) {
6238 if (sub_op) {
6239 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
6240 } else {
6241 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
6242 }
6243 } else {
6244 if (sub_op) {
6245 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
6246 } else {
6247 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
6248 }
6249 }
6250
6251 if (sf) {
6252 tcg_gen_mov_i64(tcg_rd, tcg_result);
6253 } else {
6254 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
6255 }
6256 }
6257
6258 /*
6259 * Add/subtract (shifted register)
6260 *
6261 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
6262 * +--+--+--+-----------+-----+--+-------+---------+------+------+
6263 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
6264 * +--+--+--+-----------+-----+--+-------+---------+------+------+
6265 *
6266 * sf: 0 -> 32bit, 1 -> 64bit
6267 * op: 0 -> add , 1 -> sub
6268 * S: 1 -> set flags
6269 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
6270 * imm6: Shift amount to apply to Rm before the add/sub
6271 */
disas_add_sub_reg(DisasContext * s,uint32_t insn)6272 static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
6273 {
6274 int rd = extract32(insn, 0, 5);
6275 int rn = extract32(insn, 5, 5);
6276 int imm6 = extract32(insn, 10, 6);
6277 int rm = extract32(insn, 16, 5);
6278 int shift_type = extract32(insn, 22, 2);
6279 bool setflags = extract32(insn, 29, 1);
6280 bool sub_op = extract32(insn, 30, 1);
6281 bool sf = extract32(insn, 31, 1);
6282
6283 TCGv_i64 tcg_rd = cpu_reg(s, rd);
6284 TCGv_i64 tcg_rn, tcg_rm;
6285 TCGv_i64 tcg_result;
6286
6287 if ((shift_type == 3) || (!sf && (imm6 > 31))) {
6288 unallocated_encoding(s);
6289 return;
6290 }
6291
6292 tcg_rn = read_cpu_reg(s, rn, sf);
6293 tcg_rm = read_cpu_reg(s, rm, sf);
6294
6295 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
6296
6297 tcg_result = tcg_temp_new_i64();
6298
6299 if (!setflags) {
6300 if (sub_op) {
6301 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
6302 } else {
6303 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
6304 }
6305 } else {
6306 if (sub_op) {
6307 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
6308 } else {
6309 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
6310 }
6311 }
6312
6313 if (sf) {
6314 tcg_gen_mov_i64(tcg_rd, tcg_result);
6315 } else {
6316 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
6317 }
6318 }
6319
6320 /* Data-processing (3 source)
6321 *
6322 * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
6323 * +--+------+-----------+------+------+----+------+------+------+
6324 * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
6325 * +--+------+-----------+------+------+----+------+------+------+
6326 */
disas_data_proc_3src(DisasContext * s,uint32_t insn)6327 static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
6328 {
6329 int rd = extract32(insn, 0, 5);
6330 int rn = extract32(insn, 5, 5);
6331 int ra = extract32(insn, 10, 5);
6332 int rm = extract32(insn, 16, 5);
6333 int op_id = (extract32(insn, 29, 3) << 4) |
6334 (extract32(insn, 21, 3) << 1) |
6335 extract32(insn, 15, 1);
6336 bool sf = extract32(insn, 31, 1);
6337 bool is_sub = extract32(op_id, 0, 1);
6338 bool is_high = extract32(op_id, 2, 1);
6339 bool is_signed = false;
6340 TCGv_i64 tcg_op1;
6341 TCGv_i64 tcg_op2;
6342 TCGv_i64 tcg_tmp;
6343
6344 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
6345 switch (op_id) {
6346 case 0x42: /* SMADDL */
6347 case 0x43: /* SMSUBL */
6348 case 0x44: /* SMULH */
6349 is_signed = true;
6350 break;
6351 case 0x0: /* MADD (32bit) */
6352 case 0x1: /* MSUB (32bit) */
6353 case 0x40: /* MADD (64bit) */
6354 case 0x41: /* MSUB (64bit) */
6355 case 0x4a: /* UMADDL */
6356 case 0x4b: /* UMSUBL */
6357 case 0x4c: /* UMULH */
6358 break;
6359 default:
6360 unallocated_encoding(s);
6361 return;
6362 }
6363
6364 if (is_high) {
6365 TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
6366 TCGv_i64 tcg_rd = cpu_reg(s, rd);
6367 TCGv_i64 tcg_rn = cpu_reg(s, rn);
6368 TCGv_i64 tcg_rm = cpu_reg(s, rm);
6369
6370 if (is_signed) {
6371 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
6372 } else {
6373 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
6374 }
6375 return;
6376 }
6377
6378 tcg_op1 = tcg_temp_new_i64();
6379 tcg_op2 = tcg_temp_new_i64();
6380 tcg_tmp = tcg_temp_new_i64();
6381
6382 if (op_id < 0x42) {
6383 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
6384 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
6385 } else {
6386 if (is_signed) {
6387 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
6388 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
6389 } else {
6390 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
6391 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
6392 }
6393 }
6394
6395 if (ra == 31 && !is_sub) {
6396 /* Special-case MADD with rA == XZR; it is the standard MUL alias */
6397 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
6398 } else {
6399 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
6400 if (is_sub) {
6401 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
6402 } else {
6403 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
6404 }
6405 }
6406
6407 if (!sf) {
6408 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
6409 }
6410 }
6411
6412 /* Add/subtract (with carry)
6413 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
6414 * +--+--+--+------------------------+------+-------------+------+-----+
6415 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | 0 0 0 0 0 0 | Rn | Rd |
6416 * +--+--+--+------------------------+------+-------------+------+-----+
6417 */
6418
disas_adc_sbc(DisasContext * s,uint32_t insn)6419 static void disas_adc_sbc(DisasContext *s, uint32_t insn)
6420 {
6421 unsigned int sf, op, setflags, rm, rn, rd;
6422 TCGv_i64 tcg_y, tcg_rn, tcg_rd;
6423
6424 sf = extract32(insn, 31, 1);
6425 op = extract32(insn, 30, 1);
6426 setflags = extract32(insn, 29, 1);
6427 rm = extract32(insn, 16, 5);
6428 rn = extract32(insn, 5, 5);
6429 rd = extract32(insn, 0, 5);
6430
6431 tcg_rd = cpu_reg(s, rd);
6432 tcg_rn = cpu_reg(s, rn);
6433
6434 if (op) {
6435 tcg_y = tcg_temp_new_i64();
6436 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
6437 } else {
6438 tcg_y = cpu_reg(s, rm);
6439 }
6440
6441 if (setflags) {
6442 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
6443 } else {
6444 gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
6445 }
6446 }
6447
6448 /*
6449 * Rotate right into flags
6450 * 31 30 29 21 15 10 5 4 0
6451 * +--+--+--+-----------------+--------+-----------+------+--+------+
6452 * |sf|op| S| 1 1 0 1 0 0 0 0 | imm6 | 0 0 0 0 1 | Rn |o2| mask |
6453 * +--+--+--+-----------------+--------+-----------+------+--+------+
6454 */
disas_rotate_right_into_flags(DisasContext * s,uint32_t insn)6455 static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn)
6456 {
6457 int mask = extract32(insn, 0, 4);
6458 int o2 = extract32(insn, 4, 1);
6459 int rn = extract32(insn, 5, 5);
6460 int imm6 = extract32(insn, 15, 6);
6461 int sf_op_s = extract32(insn, 29, 3);
6462 TCGv_i64 tcg_rn;
6463 TCGv_i32 nzcv;
6464
6465 if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) {
6466 unallocated_encoding(s);
6467 return;
6468 }
6469
6470 tcg_rn = read_cpu_reg(s, rn, 1);
6471 tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6);
6472
6473 nzcv = tcg_temp_new_i32();
6474 tcg_gen_extrl_i64_i32(nzcv, tcg_rn);
6475
6476 if (mask & 8) { /* N */
6477 tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3);
6478 }
6479 if (mask & 4) { /* Z */
6480 tcg_gen_not_i32(cpu_ZF, nzcv);
6481 tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4);
6482 }
6483 if (mask & 2) { /* C */
6484 tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1);
6485 }
6486 if (mask & 1) { /* V */
6487 tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0);
6488 }
6489 }
6490
6491 /*
6492 * Evaluate into flags
6493 * 31 30 29 21 15 14 10 5 4 0
6494 * +--+--+--+-----------------+---------+----+---------+------+--+------+
6495 * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 | Rn |o3| mask |
6496 * +--+--+--+-----------------+---------+----+---------+------+--+------+
6497 */
disas_evaluate_into_flags(DisasContext * s,uint32_t insn)6498 static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn)
6499 {
6500 int o3_mask = extract32(insn, 0, 5);
6501 int rn = extract32(insn, 5, 5);
6502 int o2 = extract32(insn, 15, 6);
6503 int sz = extract32(insn, 14, 1);
6504 int sf_op_s = extract32(insn, 29, 3);
6505 TCGv_i32 tmp;
6506 int shift;
6507
6508 if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd ||
6509 !dc_isar_feature(aa64_condm_4, s)) {
6510 unallocated_encoding(s);
6511 return;
6512 }
6513 shift = sz ? 16 : 24; /* SETF16 or SETF8 */
6514
6515 tmp = tcg_temp_new_i32();
6516 tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn));
6517 tcg_gen_shli_i32(cpu_NF, tmp, shift);
6518 tcg_gen_shli_i32(cpu_VF, tmp, shift - 1);
6519 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
6520 tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF);
6521 }
6522
6523 /* Conditional compare (immediate / register)
6524 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
6525 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
6526 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
6527 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
6528 * [1] y [0] [0]
6529 */
disas_cc(DisasContext * s,uint32_t insn)6530 static void disas_cc(DisasContext *s, uint32_t insn)
6531 {
6532 unsigned int sf, op, y, cond, rn, nzcv, is_imm;
6533 TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
6534 TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
6535 DisasCompare c;
6536
6537 if (!extract32(insn, 29, 1)) {
6538 unallocated_encoding(s);
6539 return;
6540 }
6541 if (insn & (1 << 10 | 1 << 4)) {
6542 unallocated_encoding(s);
6543 return;
6544 }
6545 sf = extract32(insn, 31, 1);
6546 op = extract32(insn, 30, 1);
6547 is_imm = extract32(insn, 11, 1);
6548 y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
6549 cond = extract32(insn, 12, 4);
6550 rn = extract32(insn, 5, 5);
6551 nzcv = extract32(insn, 0, 4);
6552
6553 /* Set T0 = !COND. */
6554 tcg_t0 = tcg_temp_new_i32();
6555 arm_test_cc(&c, cond);
6556 tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
6557
6558 /* Load the arguments for the new comparison. */
6559 if (is_imm) {
6560 tcg_y = tcg_temp_new_i64();
6561 tcg_gen_movi_i64(tcg_y, y);
6562 } else {
6563 tcg_y = cpu_reg(s, y);
6564 }
6565 tcg_rn = cpu_reg(s, rn);
6566
6567 /* Set the flags for the new comparison. */
6568 tcg_tmp = tcg_temp_new_i64();
6569 if (op) {
6570 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
6571 } else {
6572 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
6573 }
6574
6575 /* If COND was false, force the flags to #nzcv. Compute two masks
6576 * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
6577 * For tcg hosts that support ANDC, we can make do with just T1.
6578 * In either case, allow the tcg optimizer to delete any unused mask.
6579 */
6580 tcg_t1 = tcg_temp_new_i32();
6581 tcg_t2 = tcg_temp_new_i32();
6582 tcg_gen_neg_i32(tcg_t1, tcg_t0);
6583 tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
6584
6585 if (nzcv & 8) { /* N */
6586 tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
6587 } else {
6588 if (TCG_TARGET_HAS_andc_i32) {
6589 tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
6590 } else {
6591 tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
6592 }
6593 }
6594 if (nzcv & 4) { /* Z */
6595 if (TCG_TARGET_HAS_andc_i32) {
6596 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
6597 } else {
6598 tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
6599 }
6600 } else {
6601 tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
6602 }
6603 if (nzcv & 2) { /* C */
6604 tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
6605 } else {
6606 if (TCG_TARGET_HAS_andc_i32) {
6607 tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
6608 } else {
6609 tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
6610 }
6611 }
6612 if (nzcv & 1) { /* V */
6613 tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
6614 } else {
6615 if (TCG_TARGET_HAS_andc_i32) {
6616 tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
6617 } else {
6618 tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
6619 }
6620 }
6621 }
6622
6623 /* Conditional select
6624 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
6625 * +----+----+---+-----------------+------+------+-----+------+------+
6626 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
6627 * +----+----+---+-----------------+------+------+-----+------+------+
6628 */
disas_cond_select(DisasContext * s,uint32_t insn)6629 static void disas_cond_select(DisasContext *s, uint32_t insn)
6630 {
6631 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
6632 TCGv_i64 tcg_rd, zero;
6633 DisasCompare64 c;
6634
6635 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
6636 /* S == 1 or op2<1> == 1 */
6637 unallocated_encoding(s);
6638 return;
6639 }
6640 sf = extract32(insn, 31, 1);
6641 else_inv = extract32(insn, 30, 1);
6642 rm = extract32(insn, 16, 5);
6643 cond = extract32(insn, 12, 4);
6644 else_inc = extract32(insn, 10, 1);
6645 rn = extract32(insn, 5, 5);
6646 rd = extract32(insn, 0, 5);
6647
6648 tcg_rd = cpu_reg(s, rd);
6649
6650 a64_test_cc(&c, cond);
6651 zero = tcg_constant_i64(0);
6652
6653 if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
6654 /* CSET & CSETM. */
6655 if (else_inv) {
6656 tcg_gen_negsetcond_i64(tcg_invert_cond(c.cond),
6657 tcg_rd, c.value, zero);
6658 } else {
6659 tcg_gen_setcond_i64(tcg_invert_cond(c.cond),
6660 tcg_rd, c.value, zero);
6661 }
6662 } else {
6663 TCGv_i64 t_true = cpu_reg(s, rn);
6664 TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
6665 if (else_inv && else_inc) {
6666 tcg_gen_neg_i64(t_false, t_false);
6667 } else if (else_inv) {
6668 tcg_gen_not_i64(t_false, t_false);
6669 } else if (else_inc) {
6670 tcg_gen_addi_i64(t_false, t_false, 1);
6671 }
6672 tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
6673 }
6674
6675 if (!sf) {
6676 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
6677 }
6678 }
6679
handle_clz(DisasContext * s,unsigned int sf,unsigned int rn,unsigned int rd)6680 static void handle_clz(DisasContext *s, unsigned int sf,
6681 unsigned int rn, unsigned int rd)
6682 {
6683 TCGv_i64 tcg_rd, tcg_rn;
6684 tcg_rd = cpu_reg(s, rd);
6685 tcg_rn = cpu_reg(s, rn);
6686
6687 if (sf) {
6688 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
6689 } else {
6690 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
6691 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
6692 tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
6693 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
6694 }
6695 }
6696
handle_cls(DisasContext * s,unsigned int sf,unsigned int rn,unsigned int rd)6697 static void handle_cls(DisasContext *s, unsigned int sf,
6698 unsigned int rn, unsigned int rd)
6699 {
6700 TCGv_i64 tcg_rd, tcg_rn;
6701 tcg_rd = cpu_reg(s, rd);
6702 tcg_rn = cpu_reg(s, rn);
6703
6704 if (sf) {
6705 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
6706 } else {
6707 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
6708 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
6709 tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
6710 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
6711 }
6712 }
6713
handle_rbit(DisasContext * s,unsigned int sf,unsigned int rn,unsigned int rd)6714 static void handle_rbit(DisasContext *s, unsigned int sf,
6715 unsigned int rn, unsigned int rd)
6716 {
6717 TCGv_i64 tcg_rd, tcg_rn;
6718 tcg_rd = cpu_reg(s, rd);
6719 tcg_rn = cpu_reg(s, rn);
6720
6721 if (sf) {
6722 gen_helper_rbit64(tcg_rd, tcg_rn);
6723 } else {
6724 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
6725 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
6726 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
6727 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
6728 }
6729 }
6730
6731 /* REV with sf==1, opcode==3 ("REV64") */
handle_rev64(DisasContext * s,unsigned int sf,unsigned int rn,unsigned int rd)6732 static void handle_rev64(DisasContext *s, unsigned int sf,
6733 unsigned int rn, unsigned int rd)
6734 {
6735 if (!sf) {
6736 unallocated_encoding(s);
6737 return;
6738 }
6739 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
6740 }
6741
6742 /* REV with sf==0, opcode==2
6743 * REV32 (sf==1, opcode==2)
6744 */
handle_rev32(DisasContext * s,unsigned int sf,unsigned int rn,unsigned int rd)6745 static void handle_rev32(DisasContext *s, unsigned int sf,
6746 unsigned int rn, unsigned int rd)
6747 {
6748 TCGv_i64 tcg_rd = cpu_reg(s, rd);
6749 TCGv_i64 tcg_rn = cpu_reg(s, rn);
6750
6751 if (sf) {
6752 tcg_gen_bswap64_i64(tcg_rd, tcg_rn);
6753 tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32);
6754 } else {
6755 tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ);
6756 }
6757 }
6758
6759 /* REV16 (opcode==1) */
handle_rev16(DisasContext * s,unsigned int sf,unsigned int rn,unsigned int rd)6760 static void handle_rev16(DisasContext *s, unsigned int sf,
6761 unsigned int rn, unsigned int rd)
6762 {
6763 TCGv_i64 tcg_rd = cpu_reg(s, rd);
6764 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
6765 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
6766 TCGv_i64 mask = tcg_constant_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
6767
6768 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
6769 tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
6770 tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
6771 tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
6772 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
6773 }
6774
6775 /* Data-processing (1 source)
6776 * 31 30 29 28 21 20 16 15 10 9 5 4 0
6777 * +----+---+---+-----------------+---------+--------+------+------+
6778 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
6779 * +----+---+---+-----------------+---------+--------+------+------+
6780 */
disas_data_proc_1src(DisasContext * s,uint32_t insn)6781 static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
6782 {
6783 unsigned int sf, opcode, opcode2, rn, rd;
6784 TCGv_i64 tcg_rd;
6785
6786 if (extract32(insn, 29, 1)) {
6787 unallocated_encoding(s);
6788 return;
6789 }
6790
6791 sf = extract32(insn, 31, 1);
6792 opcode = extract32(insn, 10, 6);
6793 opcode2 = extract32(insn, 16, 5);
6794 rn = extract32(insn, 5, 5);
6795 rd = extract32(insn, 0, 5);
6796
6797 #define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
6798
6799 switch (MAP(sf, opcode2, opcode)) {
6800 case MAP(0, 0x00, 0x00): /* RBIT */
6801 case MAP(1, 0x00, 0x00):
6802 handle_rbit(s, sf, rn, rd);
6803 break;
6804 case MAP(0, 0x00, 0x01): /* REV16 */
6805 case MAP(1, 0x00, 0x01):
6806 handle_rev16(s, sf, rn, rd);
6807 break;
6808 case MAP(0, 0x00, 0x02): /* REV/REV32 */
6809 case MAP(1, 0x00, 0x02):
6810 handle_rev32(s, sf, rn, rd);
6811 break;
6812 case MAP(1, 0x00, 0x03): /* REV64 */
6813 handle_rev64(s, sf, rn, rd);
6814 break;
6815 case MAP(0, 0x00, 0x04): /* CLZ */
6816 case MAP(1, 0x00, 0x04):
6817 handle_clz(s, sf, rn, rd);
6818 break;
6819 case MAP(0, 0x00, 0x05): /* CLS */
6820 case MAP(1, 0x00, 0x05):
6821 handle_cls(s, sf, rn, rd);
6822 break;
6823 case MAP(1, 0x01, 0x00): /* PACIA */
6824 if (s->pauth_active) {
6825 tcg_rd = cpu_reg(s, rd);
6826 gen_helper_pacia(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
6827 } else if (!dc_isar_feature(aa64_pauth, s)) {
6828 goto do_unallocated;
6829 }
6830 break;
6831 case MAP(1, 0x01, 0x01): /* PACIB */
6832 if (s->pauth_active) {
6833 tcg_rd = cpu_reg(s, rd);
6834 gen_helper_pacib(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
6835 } else if (!dc_isar_feature(aa64_pauth, s)) {
6836 goto do_unallocated;
6837 }
6838 break;
6839 case MAP(1, 0x01, 0x02): /* PACDA */
6840 if (s->pauth_active) {
6841 tcg_rd = cpu_reg(s, rd);
6842 gen_helper_pacda(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
6843 } else if (!dc_isar_feature(aa64_pauth, s)) {
6844 goto do_unallocated;
6845 }
6846 break;
6847 case MAP(1, 0x01, 0x03): /* PACDB */
6848 if (s->pauth_active) {
6849 tcg_rd = cpu_reg(s, rd);
6850 gen_helper_pacdb(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
6851 } else if (!dc_isar_feature(aa64_pauth, s)) {
6852 goto do_unallocated;
6853 }
6854 break;
6855 case MAP(1, 0x01, 0x04): /* AUTIA */
6856 if (s->pauth_active) {
6857 tcg_rd = cpu_reg(s, rd);
6858 gen_helper_autia(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
6859 } else if (!dc_isar_feature(aa64_pauth, s)) {
6860 goto do_unallocated;
6861 }
6862 break;
6863 case MAP(1, 0x01, 0x05): /* AUTIB */
6864 if (s->pauth_active) {
6865 tcg_rd = cpu_reg(s, rd);
6866 gen_helper_autib(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
6867 } else if (!dc_isar_feature(aa64_pauth, s)) {
6868 goto do_unallocated;
6869 }
6870 break;
6871 case MAP(1, 0x01, 0x06): /* AUTDA */
6872 if (s->pauth_active) {
6873 tcg_rd = cpu_reg(s, rd);
6874 gen_helper_autda(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
6875 } else if (!dc_isar_feature(aa64_pauth, s)) {
6876 goto do_unallocated;
6877 }
6878 break;
6879 case MAP(1, 0x01, 0x07): /* AUTDB */
6880 if (s->pauth_active) {
6881 tcg_rd = cpu_reg(s, rd);
6882 gen_helper_autdb(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
6883 } else if (!dc_isar_feature(aa64_pauth, s)) {
6884 goto do_unallocated;
6885 }
6886 break;
6887 case MAP(1, 0x01, 0x08): /* PACIZA */
6888 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
6889 goto do_unallocated;
6890 } else if (s->pauth_active) {
6891 tcg_rd = cpu_reg(s, rd);
6892 gen_helper_pacia(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
6893 }
6894 break;
6895 case MAP(1, 0x01, 0x09): /* PACIZB */
6896 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
6897 goto do_unallocated;
6898 } else if (s->pauth_active) {
6899 tcg_rd = cpu_reg(s, rd);
6900 gen_helper_pacib(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
6901 }
6902 break;
6903 case MAP(1, 0x01, 0x0a): /* PACDZA */
6904 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
6905 goto do_unallocated;
6906 } else if (s->pauth_active) {
6907 tcg_rd = cpu_reg(s, rd);
6908 gen_helper_pacda(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
6909 }
6910 break;
6911 case MAP(1, 0x01, 0x0b): /* PACDZB */
6912 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
6913 goto do_unallocated;
6914 } else if (s->pauth_active) {
6915 tcg_rd = cpu_reg(s, rd);
6916 gen_helper_pacdb(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
6917 }
6918 break;
6919 case MAP(1, 0x01, 0x0c): /* AUTIZA */
6920 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
6921 goto do_unallocated;
6922 } else if (s->pauth_active) {
6923 tcg_rd = cpu_reg(s, rd);
6924 gen_helper_autia(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
6925 }
6926 break;
6927 case MAP(1, 0x01, 0x0d): /* AUTIZB */
6928 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
6929 goto do_unallocated;
6930 } else if (s->pauth_active) {
6931 tcg_rd = cpu_reg(s, rd);
6932 gen_helper_autib(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
6933 }
6934 break;
6935 case MAP(1, 0x01, 0x0e): /* AUTDZA */
6936 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
6937 goto do_unallocated;
6938 } else if (s->pauth_active) {
6939 tcg_rd = cpu_reg(s, rd);
6940 gen_helper_autda(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
6941 }
6942 break;
6943 case MAP(1, 0x01, 0x0f): /* AUTDZB */
6944 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
6945 goto do_unallocated;
6946 } else if (s->pauth_active) {
6947 tcg_rd = cpu_reg(s, rd);
6948 gen_helper_autdb(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
6949 }
6950 break;
6951 case MAP(1, 0x01, 0x10): /* XPACI */
6952 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
6953 goto do_unallocated;
6954 } else if (s->pauth_active) {
6955 tcg_rd = cpu_reg(s, rd);
6956 gen_helper_xpaci(tcg_rd, tcg_env, tcg_rd);
6957 }
6958 break;
6959 case MAP(1, 0x01, 0x11): /* XPACD */
6960 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
6961 goto do_unallocated;
6962 } else if (s->pauth_active) {
6963 tcg_rd = cpu_reg(s, rd);
6964 gen_helper_xpacd(tcg_rd, tcg_env, tcg_rd);
6965 }
6966 break;
6967 default:
6968 do_unallocated:
6969 unallocated_encoding(s);
6970 break;
6971 }
6972
6973 #undef MAP
6974 }
6975
handle_div(DisasContext * s,bool is_signed,unsigned int sf,unsigned int rm,unsigned int rn,unsigned int rd)6976 static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
6977 unsigned int rm, unsigned int rn, unsigned int rd)
6978 {
6979 TCGv_i64 tcg_n, tcg_m, tcg_rd;
6980 tcg_rd = cpu_reg(s, rd);
6981
6982 if (!sf && is_signed) {
6983 tcg_n = tcg_temp_new_i64();
6984 tcg_m = tcg_temp_new_i64();
6985 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
6986 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
6987 } else {
6988 tcg_n = read_cpu_reg(s, rn, sf);
6989 tcg_m = read_cpu_reg(s, rm, sf);
6990 }
6991
6992 if (is_signed) {
6993 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
6994 } else {
6995 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
6996 }
6997
6998 if (!sf) { /* zero extend final result */
6999 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
7000 }
7001 }
7002
7003 /* LSLV, LSRV, ASRV, RORV */
handle_shift_reg(DisasContext * s,enum a64_shift_type shift_type,unsigned int sf,unsigned int rm,unsigned int rn,unsigned int rd)7004 static void handle_shift_reg(DisasContext *s,
7005 enum a64_shift_type shift_type, unsigned int sf,
7006 unsigned int rm, unsigned int rn, unsigned int rd)
7007 {
7008 TCGv_i64 tcg_shift = tcg_temp_new_i64();
7009 TCGv_i64 tcg_rd = cpu_reg(s, rd);
7010 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
7011
7012 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
7013 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
7014 }
7015
7016 /* CRC32[BHWX], CRC32C[BHWX] */
handle_crc32(DisasContext * s,unsigned int sf,unsigned int sz,bool crc32c,unsigned int rm,unsigned int rn,unsigned int rd)7017 static void handle_crc32(DisasContext *s,
7018 unsigned int sf, unsigned int sz, bool crc32c,
7019 unsigned int rm, unsigned int rn, unsigned int rd)
7020 {
7021 TCGv_i64 tcg_acc, tcg_val;
7022 TCGv_i32 tcg_bytes;
7023
7024 if (!dc_isar_feature(aa64_crc32, s)
7025 || (sf == 1 && sz != 3)
7026 || (sf == 0 && sz == 3)) {
7027 unallocated_encoding(s);
7028 return;
7029 }
7030
7031 if (sz == 3) {
7032 tcg_val = cpu_reg(s, rm);
7033 } else {
7034 uint64_t mask;
7035 switch (sz) {
7036 case 0:
7037 mask = 0xFF;
7038 break;
7039 case 1:
7040 mask = 0xFFFF;
7041 break;
7042 case 2:
7043 mask = 0xFFFFFFFF;
7044 break;
7045 default:
7046 g_assert_not_reached();
7047 }
7048 tcg_val = tcg_temp_new_i64();
7049 tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
7050 }
7051
7052 tcg_acc = cpu_reg(s, rn);
7053 tcg_bytes = tcg_constant_i32(1 << sz);
7054
7055 if (crc32c) {
7056 gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
7057 } else {
7058 gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
7059 }
7060 }
7061
7062 /* Data-processing (2 source)
7063 * 31 30 29 28 21 20 16 15 10 9 5 4 0
7064 * +----+---+---+-----------------+------+--------+------+------+
7065 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
7066 * +----+---+---+-----------------+------+--------+------+------+
7067 */
disas_data_proc_2src(DisasContext * s,uint32_t insn)7068 static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
7069 {
7070 unsigned int sf, rm, opcode, rn, rd, setflag;
7071 sf = extract32(insn, 31, 1);
7072 setflag = extract32(insn, 29, 1);
7073 rm = extract32(insn, 16, 5);
7074 opcode = extract32(insn, 10, 6);
7075 rn = extract32(insn, 5, 5);
7076 rd = extract32(insn, 0, 5);
7077
7078 if (setflag && opcode != 0) {
7079 unallocated_encoding(s);
7080 return;
7081 }
7082
7083 switch (opcode) {
7084 case 0: /* SUBP(S) */
7085 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
7086 goto do_unallocated;
7087 } else {
7088 TCGv_i64 tcg_n, tcg_m, tcg_d;
7089
7090 tcg_n = read_cpu_reg_sp(s, rn, true);
7091 tcg_m = read_cpu_reg_sp(s, rm, true);
7092 tcg_gen_sextract_i64(tcg_n, tcg_n, 0, 56);
7093 tcg_gen_sextract_i64(tcg_m, tcg_m, 0, 56);
7094 tcg_d = cpu_reg(s, rd);
7095
7096 if (setflag) {
7097 gen_sub_CC(true, tcg_d, tcg_n, tcg_m);
7098 } else {
7099 tcg_gen_sub_i64(tcg_d, tcg_n, tcg_m);
7100 }
7101 }
7102 break;
7103 case 2: /* UDIV */
7104 handle_div(s, false, sf, rm, rn, rd);
7105 break;
7106 case 3: /* SDIV */
7107 handle_div(s, true, sf, rm, rn, rd);
7108 break;
7109 case 4: /* IRG */
7110 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
7111 goto do_unallocated;
7112 }
7113 if (s->ata[0]) {
7114 gen_helper_irg(cpu_reg_sp(s, rd), tcg_env,
7115 cpu_reg_sp(s, rn), cpu_reg(s, rm));
7116 } else {
7117 gen_address_with_allocation_tag0(cpu_reg_sp(s, rd),
7118 cpu_reg_sp(s, rn));
7119 }
7120 break;
7121 case 5: /* GMI */
7122 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
7123 goto do_unallocated;
7124 } else {
7125 TCGv_i64 t = tcg_temp_new_i64();
7126
7127 tcg_gen_extract_i64(t, cpu_reg_sp(s, rn), 56, 4);
7128 tcg_gen_shl_i64(t, tcg_constant_i64(1), t);
7129 tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t);
7130 }
7131 break;
7132 case 8: /* LSLV */
7133 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
7134 break;
7135 case 9: /* LSRV */
7136 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
7137 break;
7138 case 10: /* ASRV */
7139 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
7140 break;
7141 case 11: /* RORV */
7142 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
7143 break;
7144 case 12: /* PACGA */
7145 if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) {
7146 goto do_unallocated;
7147 }
7148 gen_helper_pacga(cpu_reg(s, rd), tcg_env,
7149 cpu_reg(s, rn), cpu_reg_sp(s, rm));
7150 break;
7151 case 16:
7152 case 17:
7153 case 18:
7154 case 19:
7155 case 20:
7156 case 21:
7157 case 22:
7158 case 23: /* CRC32 */
7159 {
7160 int sz = extract32(opcode, 0, 2);
7161 bool crc32c = extract32(opcode, 2, 1);
7162 handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
7163 break;
7164 }
7165 default:
7166 do_unallocated:
7167 unallocated_encoding(s);
7168 break;
7169 }
7170 }
7171
7172 /*
7173 * Data processing - register
7174 * 31 30 29 28 25 21 20 16 10 0
7175 * +--+---+--+---+-------+-----+-------+-------+---------+
7176 * | |op0| |op1| 1 0 1 | op2 | | op3 | |
7177 * +--+---+--+---+-------+-----+-------+-------+---------+
7178 */
disas_data_proc_reg(DisasContext * s,uint32_t insn)7179 static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
7180 {
7181 int op0 = extract32(insn, 30, 1);
7182 int op1 = extract32(insn, 28, 1);
7183 int op2 = extract32(insn, 21, 4);
7184 int op3 = extract32(insn, 10, 6);
7185
7186 if (!op1) {
7187 if (op2 & 8) {
7188 if (op2 & 1) {
7189 /* Add/sub (extended register) */
7190 disas_add_sub_ext_reg(s, insn);
7191 } else {
7192 /* Add/sub (shifted register) */
7193 disas_add_sub_reg(s, insn);
7194 }
7195 } else {
7196 /* Logical (shifted register) */
7197 disas_logic_reg(s, insn);
7198 }
7199 return;
7200 }
7201
7202 switch (op2) {
7203 case 0x0:
7204 switch (op3) {
7205 case 0x00: /* Add/subtract (with carry) */
7206 disas_adc_sbc(s, insn);
7207 break;
7208
7209 case 0x01: /* Rotate right into flags */
7210 case 0x21:
7211 disas_rotate_right_into_flags(s, insn);
7212 break;
7213
7214 case 0x02: /* Evaluate into flags */
7215 case 0x12:
7216 case 0x22:
7217 case 0x32:
7218 disas_evaluate_into_flags(s, insn);
7219 break;
7220
7221 default:
7222 goto do_unallocated;
7223 }
7224 break;
7225
7226 case 0x2: /* Conditional compare */
7227 disas_cc(s, insn); /* both imm and reg forms */
7228 break;
7229
7230 case 0x4: /* Conditional select */
7231 disas_cond_select(s, insn);
7232 break;
7233
7234 case 0x6: /* Data-processing */
7235 if (op0) { /* (1 source) */
7236 disas_data_proc_1src(s, insn);
7237 } else { /* (2 source) */
7238 disas_data_proc_2src(s, insn);
7239 }
7240 break;
7241 case 0x8 ... 0xf: /* (3 source) */
7242 disas_data_proc_3src(s, insn);
7243 break;
7244
7245 default:
7246 do_unallocated:
7247 unallocated_encoding(s);
7248 break;
7249 }
7250 }
7251
handle_fp_compare(DisasContext * s,int size,unsigned int rn,unsigned int rm,bool cmp_with_zero,bool signal_all_nans)7252 static void handle_fp_compare(DisasContext *s, int size,
7253 unsigned int rn, unsigned int rm,
7254 bool cmp_with_zero, bool signal_all_nans)
7255 {
7256 TCGv_i64 tcg_flags = tcg_temp_new_i64();
7257 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
7258
7259 if (size == MO_64) {
7260 TCGv_i64 tcg_vn, tcg_vm;
7261
7262 tcg_vn = read_fp_dreg(s, rn);
7263 if (cmp_with_zero) {
7264 tcg_vm = tcg_constant_i64(0);
7265 } else {
7266 tcg_vm = read_fp_dreg(s, rm);
7267 }
7268 if (signal_all_nans) {
7269 gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
7270 } else {
7271 gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
7272 }
7273 } else {
7274 TCGv_i32 tcg_vn = tcg_temp_new_i32();
7275 TCGv_i32 tcg_vm = tcg_temp_new_i32();
7276
7277 read_vec_element_i32(s, tcg_vn, rn, 0, size);
7278 if (cmp_with_zero) {
7279 tcg_gen_movi_i32(tcg_vm, 0);
7280 } else {
7281 read_vec_element_i32(s, tcg_vm, rm, 0, size);
7282 }
7283
7284 switch (size) {
7285 case MO_32:
7286 if (signal_all_nans) {
7287 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
7288 } else {
7289 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
7290 }
7291 break;
7292 case MO_16:
7293 if (signal_all_nans) {
7294 gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
7295 } else {
7296 gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
7297 }
7298 break;
7299 default:
7300 g_assert_not_reached();
7301 }
7302 }
7303
7304 gen_set_nzcv(tcg_flags);
7305 }
7306
7307 /* Floating point compare
7308 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
7309 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
7310 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
7311 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
7312 */
disas_fp_compare(DisasContext * s,uint32_t insn)7313 static void disas_fp_compare(DisasContext *s, uint32_t insn)
7314 {
7315 unsigned int mos, type, rm, op, rn, opc, op2r;
7316 int size;
7317
7318 mos = extract32(insn, 29, 3);
7319 type = extract32(insn, 22, 2);
7320 rm = extract32(insn, 16, 5);
7321 op = extract32(insn, 14, 2);
7322 rn = extract32(insn, 5, 5);
7323 opc = extract32(insn, 3, 2);
7324 op2r = extract32(insn, 0, 3);
7325
7326 if (mos || op || op2r) {
7327 unallocated_encoding(s);
7328 return;
7329 }
7330
7331 switch (type) {
7332 case 0:
7333 size = MO_32;
7334 break;
7335 case 1:
7336 size = MO_64;
7337 break;
7338 case 3:
7339 size = MO_16;
7340 if (dc_isar_feature(aa64_fp16, s)) {
7341 break;
7342 }
7343 /* fallthru */
7344 default:
7345 unallocated_encoding(s);
7346 return;
7347 }
7348
7349 if (!fp_access_check(s)) {
7350 return;
7351 }
7352
7353 handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2);
7354 }
7355
7356 /* Floating point conditional compare
7357 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
7358 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
7359 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
7360 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
7361 */
disas_fp_ccomp(DisasContext * s,uint32_t insn)7362 static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
7363 {
7364 unsigned int mos, type, rm, cond, rn, op, nzcv;
7365 TCGLabel *label_continue = NULL;
7366 int size;
7367
7368 mos = extract32(insn, 29, 3);
7369 type = extract32(insn, 22, 2);
7370 rm = extract32(insn, 16, 5);
7371 cond = extract32(insn, 12, 4);
7372 rn = extract32(insn, 5, 5);
7373 op = extract32(insn, 4, 1);
7374 nzcv = extract32(insn, 0, 4);
7375
7376 if (mos) {
7377 unallocated_encoding(s);
7378 return;
7379 }
7380
7381 switch (type) {
7382 case 0:
7383 size = MO_32;
7384 break;
7385 case 1:
7386 size = MO_64;
7387 break;
7388 case 3:
7389 size = MO_16;
7390 if (dc_isar_feature(aa64_fp16, s)) {
7391 break;
7392 }
7393 /* fallthru */
7394 default:
7395 unallocated_encoding(s);
7396 return;
7397 }
7398
7399 if (!fp_access_check(s)) {
7400 return;
7401 }
7402
7403 if (cond < 0x0e) { /* not always */
7404 TCGLabel *label_match = gen_new_label();
7405 label_continue = gen_new_label();
7406 arm_gen_test_cc(cond, label_match);
7407 /* nomatch: */
7408 gen_set_nzcv(tcg_constant_i64(nzcv << 28));
7409 tcg_gen_br(label_continue);
7410 gen_set_label(label_match);
7411 }
7412
7413 handle_fp_compare(s, size, rn, rm, false, op);
7414
7415 if (cond < 0x0e) {
7416 gen_set_label(label_continue);
7417 }
7418 }
7419
7420 /* Floating-point data-processing (1 source) - half precision */
handle_fp_1src_half(DisasContext * s,int opcode,int rd,int rn)7421 static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
7422 {
7423 TCGv_ptr fpst = NULL;
7424 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
7425 TCGv_i32 tcg_res = tcg_temp_new_i32();
7426
7427 switch (opcode) {
7428 case 0x0: /* FMOV */
7429 tcg_gen_mov_i32(tcg_res, tcg_op);
7430 break;
7431 case 0x1: /* FABS */
7432 gen_vfp_absh(tcg_res, tcg_op);
7433 break;
7434 case 0x2: /* FNEG */
7435 gen_vfp_negh(tcg_res, tcg_op);
7436 break;
7437 case 0x3: /* FSQRT */
7438 fpst = fpstatus_ptr(FPST_FPCR_F16);
7439 gen_helper_sqrt_f16(tcg_res, tcg_op, fpst);
7440 break;
7441 case 0x8: /* FRINTN */
7442 case 0x9: /* FRINTP */
7443 case 0xa: /* FRINTM */
7444 case 0xb: /* FRINTZ */
7445 case 0xc: /* FRINTA */
7446 {
7447 TCGv_i32 tcg_rmode;
7448
7449 fpst = fpstatus_ptr(FPST_FPCR_F16);
7450 tcg_rmode = gen_set_rmode(opcode & 7, fpst);
7451 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
7452 gen_restore_rmode(tcg_rmode, fpst);
7453 break;
7454 }
7455 case 0xe: /* FRINTX */
7456 fpst = fpstatus_ptr(FPST_FPCR_F16);
7457 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
7458 break;
7459 case 0xf: /* FRINTI */
7460 fpst = fpstatus_ptr(FPST_FPCR_F16);
7461 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
7462 break;
7463 default:
7464 g_assert_not_reached();
7465 }
7466
7467 write_fp_sreg(s, rd, tcg_res);
7468 }
7469
7470 /* Floating-point data-processing (1 source) - single precision */
handle_fp_1src_single(DisasContext * s,int opcode,int rd,int rn)7471 static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
7472 {
7473 void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr);
7474 TCGv_i32 tcg_op, tcg_res;
7475 TCGv_ptr fpst;
7476 int rmode = -1;
7477
7478 tcg_op = read_fp_sreg(s, rn);
7479 tcg_res = tcg_temp_new_i32();
7480
7481 switch (opcode) {
7482 case 0x0: /* FMOV */
7483 tcg_gen_mov_i32(tcg_res, tcg_op);
7484 goto done;
7485 case 0x1: /* FABS */
7486 gen_vfp_abss(tcg_res, tcg_op);
7487 goto done;
7488 case 0x2: /* FNEG */
7489 gen_vfp_negs(tcg_res, tcg_op);
7490 goto done;
7491 case 0x3: /* FSQRT */
7492 gen_helper_vfp_sqrts(tcg_res, tcg_op, tcg_env);
7493 goto done;
7494 case 0x6: /* BFCVT */
7495 gen_fpst = gen_helper_bfcvt;
7496 break;
7497 case 0x8: /* FRINTN */
7498 case 0x9: /* FRINTP */
7499 case 0xa: /* FRINTM */
7500 case 0xb: /* FRINTZ */
7501 case 0xc: /* FRINTA */
7502 rmode = opcode & 7;
7503 gen_fpst = gen_helper_rints;
7504 break;
7505 case 0xe: /* FRINTX */
7506 gen_fpst = gen_helper_rints_exact;
7507 break;
7508 case 0xf: /* FRINTI */
7509 gen_fpst = gen_helper_rints;
7510 break;
7511 case 0x10: /* FRINT32Z */
7512 rmode = FPROUNDING_ZERO;
7513 gen_fpst = gen_helper_frint32_s;
7514 break;
7515 case 0x11: /* FRINT32X */
7516 gen_fpst = gen_helper_frint32_s;
7517 break;
7518 case 0x12: /* FRINT64Z */
7519 rmode = FPROUNDING_ZERO;
7520 gen_fpst = gen_helper_frint64_s;
7521 break;
7522 case 0x13: /* FRINT64X */
7523 gen_fpst = gen_helper_frint64_s;
7524 break;
7525 default:
7526 g_assert_not_reached();
7527 }
7528
7529 fpst = fpstatus_ptr(FPST_FPCR);
7530 if (rmode >= 0) {
7531 TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst);
7532 gen_fpst(tcg_res, tcg_op, fpst);
7533 gen_restore_rmode(tcg_rmode, fpst);
7534 } else {
7535 gen_fpst(tcg_res, tcg_op, fpst);
7536 }
7537
7538 done:
7539 write_fp_sreg(s, rd, tcg_res);
7540 }
7541
7542 /* Floating-point data-processing (1 source) - double precision */
handle_fp_1src_double(DisasContext * s,int opcode,int rd,int rn)7543 static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
7544 {
7545 void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr);
7546 TCGv_i64 tcg_op, tcg_res;
7547 TCGv_ptr fpst;
7548 int rmode = -1;
7549
7550 switch (opcode) {
7551 case 0x0: /* FMOV */
7552 gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
7553 return;
7554 }
7555
7556 tcg_op = read_fp_dreg(s, rn);
7557 tcg_res = tcg_temp_new_i64();
7558
7559 switch (opcode) {
7560 case 0x1: /* FABS */
7561 gen_vfp_absd(tcg_res, tcg_op);
7562 goto done;
7563 case 0x2: /* FNEG */
7564 gen_vfp_negd(tcg_res, tcg_op);
7565 goto done;
7566 case 0x3: /* FSQRT */
7567 gen_helper_vfp_sqrtd(tcg_res, tcg_op, tcg_env);
7568 goto done;
7569 case 0x8: /* FRINTN */
7570 case 0x9: /* FRINTP */
7571 case 0xa: /* FRINTM */
7572 case 0xb: /* FRINTZ */
7573 case 0xc: /* FRINTA */
7574 rmode = opcode & 7;
7575 gen_fpst = gen_helper_rintd;
7576 break;
7577 case 0xe: /* FRINTX */
7578 gen_fpst = gen_helper_rintd_exact;
7579 break;
7580 case 0xf: /* FRINTI */
7581 gen_fpst = gen_helper_rintd;
7582 break;
7583 case 0x10: /* FRINT32Z */
7584 rmode = FPROUNDING_ZERO;
7585 gen_fpst = gen_helper_frint32_d;
7586 break;
7587 case 0x11: /* FRINT32X */
7588 gen_fpst = gen_helper_frint32_d;
7589 break;
7590 case 0x12: /* FRINT64Z */
7591 rmode = FPROUNDING_ZERO;
7592 gen_fpst = gen_helper_frint64_d;
7593 break;
7594 case 0x13: /* FRINT64X */
7595 gen_fpst = gen_helper_frint64_d;
7596 break;
7597 default:
7598 g_assert_not_reached();
7599 }
7600
7601 fpst = fpstatus_ptr(FPST_FPCR);
7602 if (rmode >= 0) {
7603 TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst);
7604 gen_fpst(tcg_res, tcg_op, fpst);
7605 gen_restore_rmode(tcg_rmode, fpst);
7606 } else {
7607 gen_fpst(tcg_res, tcg_op, fpst);
7608 }
7609
7610 done:
7611 write_fp_dreg(s, rd, tcg_res);
7612 }
7613
handle_fp_fcvt(DisasContext * s,int opcode,int rd,int rn,int dtype,int ntype)7614 static void handle_fp_fcvt(DisasContext *s, int opcode,
7615 int rd, int rn, int dtype, int ntype)
7616 {
7617 switch (ntype) {
7618 case 0x0:
7619 {
7620 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
7621 if (dtype == 1) {
7622 /* Single to double */
7623 TCGv_i64 tcg_rd = tcg_temp_new_i64();
7624 gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, tcg_env);
7625 write_fp_dreg(s, rd, tcg_rd);
7626 } else {
7627 /* Single to half */
7628 TCGv_i32 tcg_rd = tcg_temp_new_i32();
7629 TCGv_i32 ahp = get_ahp_flag();
7630 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
7631
7632 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp);
7633 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
7634 write_fp_sreg(s, rd, tcg_rd);
7635 }
7636 break;
7637 }
7638 case 0x1:
7639 {
7640 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
7641 TCGv_i32 tcg_rd = tcg_temp_new_i32();
7642 if (dtype == 0) {
7643 /* Double to single */
7644 gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, tcg_env);
7645 } else {
7646 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
7647 TCGv_i32 ahp = get_ahp_flag();
7648 /* Double to half */
7649 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
7650 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
7651 }
7652 write_fp_sreg(s, rd, tcg_rd);
7653 break;
7654 }
7655 case 0x3:
7656 {
7657 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
7658 TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_FPCR);
7659 TCGv_i32 tcg_ahp = get_ahp_flag();
7660 tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
7661 if (dtype == 0) {
7662 /* Half to single */
7663 TCGv_i32 tcg_rd = tcg_temp_new_i32();
7664 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
7665 write_fp_sreg(s, rd, tcg_rd);
7666 } else {
7667 /* Half to double */
7668 TCGv_i64 tcg_rd = tcg_temp_new_i64();
7669 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
7670 write_fp_dreg(s, rd, tcg_rd);
7671 }
7672 break;
7673 }
7674 default:
7675 g_assert_not_reached();
7676 }
7677 }
7678
7679 /* Floating point data-processing (1 source)
7680 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
7681 * +---+---+---+-----------+------+---+--------+-----------+------+------+
7682 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
7683 * +---+---+---+-----------+------+---+--------+-----------+------+------+
7684 */
disas_fp_1src(DisasContext * s,uint32_t insn)7685 static void disas_fp_1src(DisasContext *s, uint32_t insn)
7686 {
7687 int mos = extract32(insn, 29, 3);
7688 int type = extract32(insn, 22, 2);
7689 int opcode = extract32(insn, 15, 6);
7690 int rn = extract32(insn, 5, 5);
7691 int rd = extract32(insn, 0, 5);
7692
7693 if (mos) {
7694 goto do_unallocated;
7695 }
7696
7697 switch (opcode) {
7698 case 0x4: case 0x5: case 0x7:
7699 {
7700 /* FCVT between half, single and double precision */
7701 int dtype = extract32(opcode, 0, 2);
7702 if (type == 2 || dtype == type) {
7703 goto do_unallocated;
7704 }
7705 if (!fp_access_check(s)) {
7706 return;
7707 }
7708
7709 handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
7710 break;
7711 }
7712
7713 case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
7714 if (type > 1 || !dc_isar_feature(aa64_frint, s)) {
7715 goto do_unallocated;
7716 }
7717 /* fall through */
7718 case 0x0 ... 0x3:
7719 case 0x8 ... 0xc:
7720 case 0xe ... 0xf:
7721 /* 32-to-32 and 64-to-64 ops */
7722 switch (type) {
7723 case 0:
7724 if (!fp_access_check(s)) {
7725 return;
7726 }
7727 handle_fp_1src_single(s, opcode, rd, rn);
7728 break;
7729 case 1:
7730 if (!fp_access_check(s)) {
7731 return;
7732 }
7733 handle_fp_1src_double(s, opcode, rd, rn);
7734 break;
7735 case 3:
7736 if (!dc_isar_feature(aa64_fp16, s)) {
7737 goto do_unallocated;
7738 }
7739
7740 if (!fp_access_check(s)) {
7741 return;
7742 }
7743 handle_fp_1src_half(s, opcode, rd, rn);
7744 break;
7745 default:
7746 goto do_unallocated;
7747 }
7748 break;
7749
7750 case 0x6:
7751 switch (type) {
7752 case 1: /* BFCVT */
7753 if (!dc_isar_feature(aa64_bf16, s)) {
7754 goto do_unallocated;
7755 }
7756 if (!fp_access_check(s)) {
7757 return;
7758 }
7759 handle_fp_1src_single(s, opcode, rd, rn);
7760 break;
7761 default:
7762 goto do_unallocated;
7763 }
7764 break;
7765
7766 default:
7767 do_unallocated:
7768 unallocated_encoding(s);
7769 break;
7770 }
7771 }
7772
7773 /* Floating point immediate
7774 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
7775 * +---+---+---+-----------+------+---+------------+-------+------+------+
7776 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
7777 * +---+---+---+-----------+------+---+------------+-------+------+------+
7778 */
disas_fp_imm(DisasContext * s,uint32_t insn)7779 static void disas_fp_imm(DisasContext *s, uint32_t insn)
7780 {
7781 int rd = extract32(insn, 0, 5);
7782 int imm5 = extract32(insn, 5, 5);
7783 int imm8 = extract32(insn, 13, 8);
7784 int type = extract32(insn, 22, 2);
7785 int mos = extract32(insn, 29, 3);
7786 uint64_t imm;
7787 MemOp sz;
7788
7789 if (mos || imm5) {
7790 unallocated_encoding(s);
7791 return;
7792 }
7793
7794 switch (type) {
7795 case 0:
7796 sz = MO_32;
7797 break;
7798 case 1:
7799 sz = MO_64;
7800 break;
7801 case 3:
7802 sz = MO_16;
7803 if (dc_isar_feature(aa64_fp16, s)) {
7804 break;
7805 }
7806 /* fallthru */
7807 default:
7808 unallocated_encoding(s);
7809 return;
7810 }
7811
7812 if (!fp_access_check(s)) {
7813 return;
7814 }
7815
7816 imm = vfp_expand_imm(sz, imm8);
7817 write_fp_dreg(s, rd, tcg_constant_i64(imm));
7818 }
7819
7820 /* Handle floating point <=> fixed point conversions. Note that we can
7821 * also deal with fp <=> integer conversions as a special case (scale == 64)
7822 * OPTME: consider handling that special case specially or at least skipping
7823 * the call to scalbn in the helpers for zero shifts.
7824 */
handle_fpfpcvt(DisasContext * s,int rd,int rn,int opcode,bool itof,int rmode,int scale,int sf,int type)7825 static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
7826 bool itof, int rmode, int scale, int sf, int type)
7827 {
7828 bool is_signed = !(opcode & 1);
7829 TCGv_ptr tcg_fpstatus;
7830 TCGv_i32 tcg_shift, tcg_single;
7831 TCGv_i64 tcg_double;
7832
7833 tcg_fpstatus = fpstatus_ptr(type == 3 ? FPST_FPCR_F16 : FPST_FPCR);
7834
7835 tcg_shift = tcg_constant_i32(64 - scale);
7836
7837 if (itof) {
7838 TCGv_i64 tcg_int = cpu_reg(s, rn);
7839 if (!sf) {
7840 TCGv_i64 tcg_extend = tcg_temp_new_i64();
7841
7842 if (is_signed) {
7843 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
7844 } else {
7845 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
7846 }
7847
7848 tcg_int = tcg_extend;
7849 }
7850
7851 switch (type) {
7852 case 1: /* float64 */
7853 tcg_double = tcg_temp_new_i64();
7854 if (is_signed) {
7855 gen_helper_vfp_sqtod(tcg_double, tcg_int,
7856 tcg_shift, tcg_fpstatus);
7857 } else {
7858 gen_helper_vfp_uqtod(tcg_double, tcg_int,
7859 tcg_shift, tcg_fpstatus);
7860 }
7861 write_fp_dreg(s, rd, tcg_double);
7862 break;
7863
7864 case 0: /* float32 */
7865 tcg_single = tcg_temp_new_i32();
7866 if (is_signed) {
7867 gen_helper_vfp_sqtos(tcg_single, tcg_int,
7868 tcg_shift, tcg_fpstatus);
7869 } else {
7870 gen_helper_vfp_uqtos(tcg_single, tcg_int,
7871 tcg_shift, tcg_fpstatus);
7872 }
7873 write_fp_sreg(s, rd, tcg_single);
7874 break;
7875
7876 case 3: /* float16 */
7877 tcg_single = tcg_temp_new_i32();
7878 if (is_signed) {
7879 gen_helper_vfp_sqtoh(tcg_single, tcg_int,
7880 tcg_shift, tcg_fpstatus);
7881 } else {
7882 gen_helper_vfp_uqtoh(tcg_single, tcg_int,
7883 tcg_shift, tcg_fpstatus);
7884 }
7885 write_fp_sreg(s, rd, tcg_single);
7886 break;
7887
7888 default:
7889 g_assert_not_reached();
7890 }
7891 } else {
7892 TCGv_i64 tcg_int = cpu_reg(s, rd);
7893 TCGv_i32 tcg_rmode;
7894
7895 if (extract32(opcode, 2, 1)) {
7896 /* There are too many rounding modes to all fit into rmode,
7897 * so FCVTA[US] is a special case.
7898 */
7899 rmode = FPROUNDING_TIEAWAY;
7900 }
7901
7902 tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
7903
7904 switch (type) {
7905 case 1: /* float64 */
7906 tcg_double = read_fp_dreg(s, rn);
7907 if (is_signed) {
7908 if (!sf) {
7909 gen_helper_vfp_tosld(tcg_int, tcg_double,
7910 tcg_shift, tcg_fpstatus);
7911 } else {
7912 gen_helper_vfp_tosqd(tcg_int, tcg_double,
7913 tcg_shift, tcg_fpstatus);
7914 }
7915 } else {
7916 if (!sf) {
7917 gen_helper_vfp_tould(tcg_int, tcg_double,
7918 tcg_shift, tcg_fpstatus);
7919 } else {
7920 gen_helper_vfp_touqd(tcg_int, tcg_double,
7921 tcg_shift, tcg_fpstatus);
7922 }
7923 }
7924 if (!sf) {
7925 tcg_gen_ext32u_i64(tcg_int, tcg_int);
7926 }
7927 break;
7928
7929 case 0: /* float32 */
7930 tcg_single = read_fp_sreg(s, rn);
7931 if (sf) {
7932 if (is_signed) {
7933 gen_helper_vfp_tosqs(tcg_int, tcg_single,
7934 tcg_shift, tcg_fpstatus);
7935 } else {
7936 gen_helper_vfp_touqs(tcg_int, tcg_single,
7937 tcg_shift, tcg_fpstatus);
7938 }
7939 } else {
7940 TCGv_i32 tcg_dest = tcg_temp_new_i32();
7941 if (is_signed) {
7942 gen_helper_vfp_tosls(tcg_dest, tcg_single,
7943 tcg_shift, tcg_fpstatus);
7944 } else {
7945 gen_helper_vfp_touls(tcg_dest, tcg_single,
7946 tcg_shift, tcg_fpstatus);
7947 }
7948 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
7949 }
7950 break;
7951
7952 case 3: /* float16 */
7953 tcg_single = read_fp_sreg(s, rn);
7954 if (sf) {
7955 if (is_signed) {
7956 gen_helper_vfp_tosqh(tcg_int, tcg_single,
7957 tcg_shift, tcg_fpstatus);
7958 } else {
7959 gen_helper_vfp_touqh(tcg_int, tcg_single,
7960 tcg_shift, tcg_fpstatus);
7961 }
7962 } else {
7963 TCGv_i32 tcg_dest = tcg_temp_new_i32();
7964 if (is_signed) {
7965 gen_helper_vfp_toslh(tcg_dest, tcg_single,
7966 tcg_shift, tcg_fpstatus);
7967 } else {
7968 gen_helper_vfp_toulh(tcg_dest, tcg_single,
7969 tcg_shift, tcg_fpstatus);
7970 }
7971 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
7972 }
7973 break;
7974
7975 default:
7976 g_assert_not_reached();
7977 }
7978
7979 gen_restore_rmode(tcg_rmode, tcg_fpstatus);
7980 }
7981 }
7982
7983 /* Floating point <-> fixed point conversions
7984 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
7985 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
7986 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
7987 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
7988 */
disas_fp_fixed_conv(DisasContext * s,uint32_t insn)7989 static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
7990 {
7991 int rd = extract32(insn, 0, 5);
7992 int rn = extract32(insn, 5, 5);
7993 int scale = extract32(insn, 10, 6);
7994 int opcode = extract32(insn, 16, 3);
7995 int rmode = extract32(insn, 19, 2);
7996 int type = extract32(insn, 22, 2);
7997 bool sbit = extract32(insn, 29, 1);
7998 bool sf = extract32(insn, 31, 1);
7999 bool itof;
8000
8001 if (sbit || (!sf && scale < 32)) {
8002 unallocated_encoding(s);
8003 return;
8004 }
8005
8006 switch (type) {
8007 case 0: /* float32 */
8008 case 1: /* float64 */
8009 break;
8010 case 3: /* float16 */
8011 if (dc_isar_feature(aa64_fp16, s)) {
8012 break;
8013 }
8014 /* fallthru */
8015 default:
8016 unallocated_encoding(s);
8017 return;
8018 }
8019
8020 switch ((rmode << 3) | opcode) {
8021 case 0x2: /* SCVTF */
8022 case 0x3: /* UCVTF */
8023 itof = true;
8024 break;
8025 case 0x18: /* FCVTZS */
8026 case 0x19: /* FCVTZU */
8027 itof = false;
8028 break;
8029 default:
8030 unallocated_encoding(s);
8031 return;
8032 }
8033
8034 if (!fp_access_check(s)) {
8035 return;
8036 }
8037
8038 handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
8039 }
8040
handle_fmov(DisasContext * s,int rd,int rn,int type,bool itof)8041 static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
8042 {
8043 /* FMOV: gpr to or from float, double, or top half of quad fp reg,
8044 * without conversion.
8045 */
8046
8047 if (itof) {
8048 TCGv_i64 tcg_rn = cpu_reg(s, rn);
8049 TCGv_i64 tmp;
8050
8051 switch (type) {
8052 case 0:
8053 /* 32 bit */
8054 tmp = tcg_temp_new_i64();
8055 tcg_gen_ext32u_i64(tmp, tcg_rn);
8056 write_fp_dreg(s, rd, tmp);
8057 break;
8058 case 1:
8059 /* 64 bit */
8060 write_fp_dreg(s, rd, tcg_rn);
8061 break;
8062 case 2:
8063 /* 64 bit to top half. */
8064 tcg_gen_st_i64(tcg_rn, tcg_env, fp_reg_hi_offset(s, rd));
8065 clear_vec_high(s, true, rd);
8066 break;
8067 case 3:
8068 /* 16 bit */
8069 tmp = tcg_temp_new_i64();
8070 tcg_gen_ext16u_i64(tmp, tcg_rn);
8071 write_fp_dreg(s, rd, tmp);
8072 break;
8073 default:
8074 g_assert_not_reached();
8075 }
8076 } else {
8077 TCGv_i64 tcg_rd = cpu_reg(s, rd);
8078
8079 switch (type) {
8080 case 0:
8081 /* 32 bit */
8082 tcg_gen_ld32u_i64(tcg_rd, tcg_env, fp_reg_offset(s, rn, MO_32));
8083 break;
8084 case 1:
8085 /* 64 bit */
8086 tcg_gen_ld_i64(tcg_rd, tcg_env, fp_reg_offset(s, rn, MO_64));
8087 break;
8088 case 2:
8089 /* 64 bits from top half */
8090 tcg_gen_ld_i64(tcg_rd, tcg_env, fp_reg_hi_offset(s, rn));
8091 break;
8092 case 3:
8093 /* 16 bit */
8094 tcg_gen_ld16u_i64(tcg_rd, tcg_env, fp_reg_offset(s, rn, MO_16));
8095 break;
8096 default:
8097 g_assert_not_reached();
8098 }
8099 }
8100 }
8101
handle_fjcvtzs(DisasContext * s,int rd,int rn)8102 static void handle_fjcvtzs(DisasContext *s, int rd, int rn)
8103 {
8104 TCGv_i64 t = read_fp_dreg(s, rn);
8105 TCGv_ptr fpstatus = fpstatus_ptr(FPST_FPCR);
8106
8107 gen_helper_fjcvtzs(t, t, fpstatus);
8108
8109 tcg_gen_ext32u_i64(cpu_reg(s, rd), t);
8110 tcg_gen_extrh_i64_i32(cpu_ZF, t);
8111 tcg_gen_movi_i32(cpu_CF, 0);
8112 tcg_gen_movi_i32(cpu_NF, 0);
8113 tcg_gen_movi_i32(cpu_VF, 0);
8114 }
8115
8116 /* Floating point <-> integer conversions
8117 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
8118 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
8119 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
8120 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
8121 */
disas_fp_int_conv(DisasContext * s,uint32_t insn)8122 static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
8123 {
8124 int rd = extract32(insn, 0, 5);
8125 int rn = extract32(insn, 5, 5);
8126 int opcode = extract32(insn, 16, 3);
8127 int rmode = extract32(insn, 19, 2);
8128 int type = extract32(insn, 22, 2);
8129 bool sbit = extract32(insn, 29, 1);
8130 bool sf = extract32(insn, 31, 1);
8131 bool itof = false;
8132
8133 if (sbit) {
8134 goto do_unallocated;
8135 }
8136
8137 switch (opcode) {
8138 case 2: /* SCVTF */
8139 case 3: /* UCVTF */
8140 itof = true;
8141 /* fallthru */
8142 case 4: /* FCVTAS */
8143 case 5: /* FCVTAU */
8144 if (rmode != 0) {
8145 goto do_unallocated;
8146 }
8147 /* fallthru */
8148 case 0: /* FCVT[NPMZ]S */
8149 case 1: /* FCVT[NPMZ]U */
8150 switch (type) {
8151 case 0: /* float32 */
8152 case 1: /* float64 */
8153 break;
8154 case 3: /* float16 */
8155 if (!dc_isar_feature(aa64_fp16, s)) {
8156 goto do_unallocated;
8157 }
8158 break;
8159 default:
8160 goto do_unallocated;
8161 }
8162 if (!fp_access_check(s)) {
8163 return;
8164 }
8165 handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
8166 break;
8167
8168 default:
8169 switch (sf << 7 | type << 5 | rmode << 3 | opcode) {
8170 case 0b01100110: /* FMOV half <-> 32-bit int */
8171 case 0b01100111:
8172 case 0b11100110: /* FMOV half <-> 64-bit int */
8173 case 0b11100111:
8174 if (!dc_isar_feature(aa64_fp16, s)) {
8175 goto do_unallocated;
8176 }
8177 /* fallthru */
8178 case 0b00000110: /* FMOV 32-bit */
8179 case 0b00000111:
8180 case 0b10100110: /* FMOV 64-bit */
8181 case 0b10100111:
8182 case 0b11001110: /* FMOV top half of 128-bit */
8183 case 0b11001111:
8184 if (!fp_access_check(s)) {
8185 return;
8186 }
8187 itof = opcode & 1;
8188 handle_fmov(s, rd, rn, type, itof);
8189 break;
8190
8191 case 0b00111110: /* FJCVTZS */
8192 if (!dc_isar_feature(aa64_jscvt, s)) {
8193 goto do_unallocated;
8194 } else if (fp_access_check(s)) {
8195 handle_fjcvtzs(s, rd, rn);
8196 }
8197 break;
8198
8199 default:
8200 do_unallocated:
8201 unallocated_encoding(s);
8202 return;
8203 }
8204 break;
8205 }
8206 }
8207
8208 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
8209 * 31 30 29 28 25 24 0
8210 * +---+---+---+---------+-----------------------------+
8211 * | | 0 | | 1 1 1 1 | |
8212 * +---+---+---+---------+-----------------------------+
8213 */
disas_data_proc_fp(DisasContext * s,uint32_t insn)8214 static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
8215 {
8216 if (extract32(insn, 24, 1)) {
8217 unallocated_encoding(s); /* in decodetree */
8218 } else if (extract32(insn, 21, 1) == 0) {
8219 /* Floating point to fixed point conversions */
8220 disas_fp_fixed_conv(s, insn);
8221 } else {
8222 switch (extract32(insn, 10, 2)) {
8223 case 1:
8224 /* Floating point conditional compare */
8225 disas_fp_ccomp(s, insn);
8226 break;
8227 case 2:
8228 /* Floating point data-processing (2 source) */
8229 unallocated_encoding(s); /* in decodetree */
8230 break;
8231 case 3:
8232 /* Floating point conditional select */
8233 unallocated_encoding(s); /* in decodetree */
8234 break;
8235 case 0:
8236 switch (ctz32(extract32(insn, 12, 4))) {
8237 case 0: /* [15:12] == xxx1 */
8238 /* Floating point immediate */
8239 disas_fp_imm(s, insn);
8240 break;
8241 case 1: /* [15:12] == xx10 */
8242 /* Floating point compare */
8243 disas_fp_compare(s, insn);
8244 break;
8245 case 2: /* [15:12] == x100 */
8246 /* Floating point data-processing (1 source) */
8247 disas_fp_1src(s, insn);
8248 break;
8249 case 3: /* [15:12] == 1000 */
8250 unallocated_encoding(s);
8251 break;
8252 default: /* [15:12] == 0000 */
8253 /* Floating point <-> integer conversions */
8254 disas_fp_int_conv(s, insn);
8255 break;
8256 }
8257 break;
8258 }
8259 }
8260 }
8261
do_ext64(DisasContext * s,TCGv_i64 tcg_left,TCGv_i64 tcg_right,int pos)8262 static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
8263 int pos)
8264 {
8265 /* Extract 64 bits from the middle of two concatenated 64 bit
8266 * vector register slices left:right. The extracted bits start
8267 * at 'pos' bits into the right (least significant) side.
8268 * We return the result in tcg_right, and guarantee not to
8269 * trash tcg_left.
8270 */
8271 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
8272 assert(pos > 0 && pos < 64);
8273
8274 tcg_gen_shri_i64(tcg_right, tcg_right, pos);
8275 tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
8276 tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
8277 }
8278
8279 /* EXT
8280 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
8281 * +---+---+-------------+-----+---+------+---+------+---+------+------+
8282 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
8283 * +---+---+-------------+-----+---+------+---+------+---+------+------+
8284 */
disas_simd_ext(DisasContext * s,uint32_t insn)8285 static void disas_simd_ext(DisasContext *s, uint32_t insn)
8286 {
8287 int is_q = extract32(insn, 30, 1);
8288 int op2 = extract32(insn, 22, 2);
8289 int imm4 = extract32(insn, 11, 4);
8290 int rm = extract32(insn, 16, 5);
8291 int rn = extract32(insn, 5, 5);
8292 int rd = extract32(insn, 0, 5);
8293 int pos = imm4 << 3;
8294 TCGv_i64 tcg_resl, tcg_resh;
8295
8296 if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
8297 unallocated_encoding(s);
8298 return;
8299 }
8300
8301 if (!fp_access_check(s)) {
8302 return;
8303 }
8304
8305 tcg_resh = tcg_temp_new_i64();
8306 tcg_resl = tcg_temp_new_i64();
8307
8308 /* Vd gets bits starting at pos bits into Vm:Vn. This is
8309 * either extracting 128 bits from a 128:128 concatenation, or
8310 * extracting 64 bits from a 64:64 concatenation.
8311 */
8312 if (!is_q) {
8313 read_vec_element(s, tcg_resl, rn, 0, MO_64);
8314 if (pos != 0) {
8315 read_vec_element(s, tcg_resh, rm, 0, MO_64);
8316 do_ext64(s, tcg_resh, tcg_resl, pos);
8317 }
8318 } else {
8319 TCGv_i64 tcg_hh;
8320 typedef struct {
8321 int reg;
8322 int elt;
8323 } EltPosns;
8324 EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
8325 EltPosns *elt = eltposns;
8326
8327 if (pos >= 64) {
8328 elt++;
8329 pos -= 64;
8330 }
8331
8332 read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
8333 elt++;
8334 read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
8335 elt++;
8336 if (pos != 0) {
8337 do_ext64(s, tcg_resh, tcg_resl, pos);
8338 tcg_hh = tcg_temp_new_i64();
8339 read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
8340 do_ext64(s, tcg_hh, tcg_resh, pos);
8341 }
8342 }
8343
8344 write_vec_element(s, tcg_resl, rd, 0, MO_64);
8345 if (is_q) {
8346 write_vec_element(s, tcg_resh, rd, 1, MO_64);
8347 }
8348 clear_vec_high(s, is_q, rd);
8349 }
8350
8351 /* TBL/TBX
8352 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
8353 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
8354 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
8355 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
8356 */
disas_simd_tb(DisasContext * s,uint32_t insn)8357 static void disas_simd_tb(DisasContext *s, uint32_t insn)
8358 {
8359 int op2 = extract32(insn, 22, 2);
8360 int is_q = extract32(insn, 30, 1);
8361 int rm = extract32(insn, 16, 5);
8362 int rn = extract32(insn, 5, 5);
8363 int rd = extract32(insn, 0, 5);
8364 int is_tbx = extract32(insn, 12, 1);
8365 int len = (extract32(insn, 13, 2) + 1) * 16;
8366
8367 if (op2 != 0) {
8368 unallocated_encoding(s);
8369 return;
8370 }
8371
8372 if (!fp_access_check(s)) {
8373 return;
8374 }
8375
8376 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd),
8377 vec_full_reg_offset(s, rm), tcg_env,
8378 is_q ? 16 : 8, vec_full_reg_size(s),
8379 (len << 6) | (is_tbx << 5) | rn,
8380 gen_helper_simd_tblx);
8381 }
8382
8383 /* ZIP/UZP/TRN
8384 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
8385 * +---+---+-------------+------+---+------+---+------------------+------+
8386 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
8387 * +---+---+-------------+------+---+------+---+------------------+------+
8388 */
disas_simd_zip_trn(DisasContext * s,uint32_t insn)8389 static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
8390 {
8391 int rd = extract32(insn, 0, 5);
8392 int rn = extract32(insn, 5, 5);
8393 int rm = extract32(insn, 16, 5);
8394 int size = extract32(insn, 22, 2);
8395 /* opc field bits [1:0] indicate ZIP/UZP/TRN;
8396 * bit 2 indicates 1 vs 2 variant of the insn.
8397 */
8398 int opcode = extract32(insn, 12, 2);
8399 bool part = extract32(insn, 14, 1);
8400 bool is_q = extract32(insn, 30, 1);
8401 int esize = 8 << size;
8402 int i;
8403 int datasize = is_q ? 128 : 64;
8404 int elements = datasize / esize;
8405 TCGv_i64 tcg_res[2], tcg_ele;
8406
8407 if (opcode == 0 || (size == 3 && !is_q)) {
8408 unallocated_encoding(s);
8409 return;
8410 }
8411
8412 if (!fp_access_check(s)) {
8413 return;
8414 }
8415
8416 tcg_res[0] = tcg_temp_new_i64();
8417 tcg_res[1] = is_q ? tcg_temp_new_i64() : NULL;
8418 tcg_ele = tcg_temp_new_i64();
8419
8420 for (i = 0; i < elements; i++) {
8421 int o, w;
8422
8423 switch (opcode) {
8424 case 1: /* UZP1/2 */
8425 {
8426 int midpoint = elements / 2;
8427 if (i < midpoint) {
8428 read_vec_element(s, tcg_ele, rn, 2 * i + part, size);
8429 } else {
8430 read_vec_element(s, tcg_ele, rm,
8431 2 * (i - midpoint) + part, size);
8432 }
8433 break;
8434 }
8435 case 2: /* TRN1/2 */
8436 if (i & 1) {
8437 read_vec_element(s, tcg_ele, rm, (i & ~1) + part, size);
8438 } else {
8439 read_vec_element(s, tcg_ele, rn, (i & ~1) + part, size);
8440 }
8441 break;
8442 case 3: /* ZIP1/2 */
8443 {
8444 int base = part * elements / 2;
8445 if (i & 1) {
8446 read_vec_element(s, tcg_ele, rm, base + (i >> 1), size);
8447 } else {
8448 read_vec_element(s, tcg_ele, rn, base + (i >> 1), size);
8449 }
8450 break;
8451 }
8452 default:
8453 g_assert_not_reached();
8454 }
8455
8456 w = (i * esize) / 64;
8457 o = (i * esize) % 64;
8458 if (o == 0) {
8459 tcg_gen_mov_i64(tcg_res[w], tcg_ele);
8460 } else {
8461 tcg_gen_shli_i64(tcg_ele, tcg_ele, o);
8462 tcg_gen_or_i64(tcg_res[w], tcg_res[w], tcg_ele);
8463 }
8464 }
8465
8466 for (i = 0; i <= is_q; ++i) {
8467 write_vec_element(s, tcg_res[i], rd, i, MO_64);
8468 }
8469 clear_vec_high(s, is_q, rd);
8470 }
8471
8472 /*
8473 * do_reduction_op helper
8474 *
8475 * This mirrors the Reduce() pseudocode in the ARM ARM. It is
8476 * important for correct NaN propagation that we do these
8477 * operations in exactly the order specified by the pseudocode.
8478 *
8479 * This is a recursive function, TCG temps should be freed by the
8480 * calling function once it is done with the values.
8481 */
do_reduction_op(DisasContext * s,int fpopcode,int rn,int esize,int size,int vmap,TCGv_ptr fpst)8482 static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
8483 int esize, int size, int vmap, TCGv_ptr fpst)
8484 {
8485 if (esize == size) {
8486 int element;
8487 MemOp msize = esize == 16 ? MO_16 : MO_32;
8488 TCGv_i32 tcg_elem;
8489
8490 /* We should have one register left here */
8491 assert(ctpop8(vmap) == 1);
8492 element = ctz32(vmap);
8493 assert(element < 8);
8494
8495 tcg_elem = tcg_temp_new_i32();
8496 read_vec_element_i32(s, tcg_elem, rn, element, msize);
8497 return tcg_elem;
8498 } else {
8499 int bits = size / 2;
8500 int shift = ctpop8(vmap) / 2;
8501 int vmap_lo = (vmap >> shift) & vmap;
8502 int vmap_hi = (vmap & ~vmap_lo);
8503 TCGv_i32 tcg_hi, tcg_lo, tcg_res;
8504
8505 tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
8506 tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
8507 tcg_res = tcg_temp_new_i32();
8508
8509 switch (fpopcode) {
8510 case 0x0c: /* fmaxnmv half-precision */
8511 gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
8512 break;
8513 case 0x0f: /* fmaxv half-precision */
8514 gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
8515 break;
8516 case 0x1c: /* fminnmv half-precision */
8517 gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
8518 break;
8519 case 0x1f: /* fminv half-precision */
8520 gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
8521 break;
8522 case 0x2c: /* fmaxnmv */
8523 gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
8524 break;
8525 case 0x2f: /* fmaxv */
8526 gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
8527 break;
8528 case 0x3c: /* fminnmv */
8529 gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
8530 break;
8531 case 0x3f: /* fminv */
8532 gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
8533 break;
8534 default:
8535 g_assert_not_reached();
8536 }
8537 return tcg_res;
8538 }
8539 }
8540
8541 /* AdvSIMD across lanes
8542 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
8543 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
8544 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
8545 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
8546 */
disas_simd_across_lanes(DisasContext * s,uint32_t insn)8547 static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
8548 {
8549 int rd = extract32(insn, 0, 5);
8550 int rn = extract32(insn, 5, 5);
8551 int size = extract32(insn, 22, 2);
8552 int opcode = extract32(insn, 12, 5);
8553 bool is_q = extract32(insn, 30, 1);
8554 bool is_u = extract32(insn, 29, 1);
8555 bool is_fp = false;
8556 bool is_min = false;
8557 int esize;
8558 int elements;
8559 int i;
8560 TCGv_i64 tcg_res, tcg_elt;
8561
8562 switch (opcode) {
8563 case 0x1b: /* ADDV */
8564 if (is_u) {
8565 unallocated_encoding(s);
8566 return;
8567 }
8568 /* fall through */
8569 case 0x3: /* SADDLV, UADDLV */
8570 case 0xa: /* SMAXV, UMAXV */
8571 case 0x1a: /* SMINV, UMINV */
8572 if (size == 3 || (size == 2 && !is_q)) {
8573 unallocated_encoding(s);
8574 return;
8575 }
8576 break;
8577 case 0xc: /* FMAXNMV, FMINNMV */
8578 case 0xf: /* FMAXV, FMINV */
8579 /* Bit 1 of size field encodes min vs max and the actual size
8580 * depends on the encoding of the U bit. If not set (and FP16
8581 * enabled) then we do half-precision float instead of single
8582 * precision.
8583 */
8584 is_min = extract32(size, 1, 1);
8585 is_fp = true;
8586 if (!is_u && dc_isar_feature(aa64_fp16, s)) {
8587 size = 1;
8588 } else if (!is_u || !is_q || extract32(size, 0, 1)) {
8589 unallocated_encoding(s);
8590 return;
8591 } else {
8592 size = 2;
8593 }
8594 break;
8595 default:
8596 unallocated_encoding(s);
8597 return;
8598 }
8599
8600 if (!fp_access_check(s)) {
8601 return;
8602 }
8603
8604 esize = 8 << size;
8605 elements = (is_q ? 128 : 64) / esize;
8606
8607 tcg_res = tcg_temp_new_i64();
8608 tcg_elt = tcg_temp_new_i64();
8609
8610 /* These instructions operate across all lanes of a vector
8611 * to produce a single result. We can guarantee that a 64
8612 * bit intermediate is sufficient:
8613 * + for [US]ADDLV the maximum element size is 32 bits, and
8614 * the result type is 64 bits
8615 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
8616 * same as the element size, which is 32 bits at most
8617 * For the integer operations we can choose to work at 64
8618 * or 32 bits and truncate at the end; for simplicity
8619 * we use 64 bits always. The floating point
8620 * ops do require 32 bit intermediates, though.
8621 */
8622 if (!is_fp) {
8623 read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
8624
8625 for (i = 1; i < elements; i++) {
8626 read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
8627
8628 switch (opcode) {
8629 case 0x03: /* SADDLV / UADDLV */
8630 case 0x1b: /* ADDV */
8631 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
8632 break;
8633 case 0x0a: /* SMAXV / UMAXV */
8634 if (is_u) {
8635 tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
8636 } else {
8637 tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
8638 }
8639 break;
8640 case 0x1a: /* SMINV / UMINV */
8641 if (is_u) {
8642 tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
8643 } else {
8644 tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
8645 }
8646 break;
8647 default:
8648 g_assert_not_reached();
8649 }
8650
8651 }
8652 } else {
8653 /* Floating point vector reduction ops which work across 32
8654 * bit (single) or 16 bit (half-precision) intermediates.
8655 * Note that correct NaN propagation requires that we do these
8656 * operations in exactly the order specified by the pseudocode.
8657 */
8658 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8659 int fpopcode = opcode | is_min << 4 | is_u << 5;
8660 int vmap = (1 << elements) - 1;
8661 TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
8662 (is_q ? 128 : 64), vmap, fpst);
8663 tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
8664 }
8665
8666 /* Now truncate the result to the width required for the final output */
8667 if (opcode == 0x03) {
8668 /* SADDLV, UADDLV: result is 2*esize */
8669 size++;
8670 }
8671
8672 switch (size) {
8673 case 0:
8674 tcg_gen_ext8u_i64(tcg_res, tcg_res);
8675 break;
8676 case 1:
8677 tcg_gen_ext16u_i64(tcg_res, tcg_res);
8678 break;
8679 case 2:
8680 tcg_gen_ext32u_i64(tcg_res, tcg_res);
8681 break;
8682 case 3:
8683 break;
8684 default:
8685 g_assert_not_reached();
8686 }
8687
8688 write_fp_dreg(s, rd, tcg_res);
8689 }
8690
8691 /* AdvSIMD modified immediate
8692 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
8693 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
8694 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
8695 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
8696 *
8697 * There are a number of operations that can be carried out here:
8698 * MOVI - move (shifted) imm into register
8699 * MVNI - move inverted (shifted) imm into register
8700 * ORR - bitwise OR of (shifted) imm with register
8701 * BIC - bitwise clear of (shifted) imm with register
8702 * With ARMv8.2 we also have:
8703 * FMOV half-precision
8704 */
disas_simd_mod_imm(DisasContext * s,uint32_t insn)8705 static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
8706 {
8707 int rd = extract32(insn, 0, 5);
8708 int cmode = extract32(insn, 12, 4);
8709 int o2 = extract32(insn, 11, 1);
8710 uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
8711 bool is_neg = extract32(insn, 29, 1);
8712 bool is_q = extract32(insn, 30, 1);
8713 uint64_t imm = 0;
8714
8715 if (o2) {
8716 if (cmode != 0xf || is_neg) {
8717 unallocated_encoding(s);
8718 return;
8719 }
8720 /* FMOV (vector, immediate) - half-precision */
8721 if (!dc_isar_feature(aa64_fp16, s)) {
8722 unallocated_encoding(s);
8723 return;
8724 }
8725 imm = vfp_expand_imm(MO_16, abcdefgh);
8726 /* now duplicate across the lanes */
8727 imm = dup_const(MO_16, imm);
8728 } else {
8729 if (cmode == 0xf && is_neg && !is_q) {
8730 unallocated_encoding(s);
8731 return;
8732 }
8733 imm = asimd_imm_const(abcdefgh, cmode, is_neg);
8734 }
8735
8736 if (!fp_access_check(s)) {
8737 return;
8738 }
8739
8740 if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
8741 /* MOVI or MVNI, with MVNI negation handled above. */
8742 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
8743 vec_full_reg_size(s), imm);
8744 } else {
8745 /* ORR or BIC, with BIC negation to AND handled above. */
8746 if (is_neg) {
8747 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
8748 } else {
8749 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
8750 }
8751 }
8752 }
8753
8754 /*
8755 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
8756 *
8757 * This code is handles the common shifting code and is used by both
8758 * the vector and scalar code.
8759 */
handle_shri_with_rndacc(TCGv_i64 tcg_res,TCGv_i64 tcg_src,TCGv_i64 tcg_rnd,bool accumulate,bool is_u,int size,int shift)8760 static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
8761 TCGv_i64 tcg_rnd, bool accumulate,
8762 bool is_u, int size, int shift)
8763 {
8764 bool extended_result = false;
8765 bool round = tcg_rnd != NULL;
8766 int ext_lshift = 0;
8767 TCGv_i64 tcg_src_hi;
8768
8769 if (round && size == 3) {
8770 extended_result = true;
8771 ext_lshift = 64 - shift;
8772 tcg_src_hi = tcg_temp_new_i64();
8773 } else if (shift == 64) {
8774 if (!accumulate && is_u) {
8775 /* result is zero */
8776 tcg_gen_movi_i64(tcg_res, 0);
8777 return;
8778 }
8779 }
8780
8781 /* Deal with the rounding step */
8782 if (round) {
8783 if (extended_result) {
8784 TCGv_i64 tcg_zero = tcg_constant_i64(0);
8785 if (!is_u) {
8786 /* take care of sign extending tcg_res */
8787 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
8788 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8789 tcg_src, tcg_src_hi,
8790 tcg_rnd, tcg_zero);
8791 } else {
8792 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8793 tcg_src, tcg_zero,
8794 tcg_rnd, tcg_zero);
8795 }
8796 } else {
8797 tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
8798 }
8799 }
8800
8801 /* Now do the shift right */
8802 if (round && extended_result) {
8803 /* extended case, >64 bit precision required */
8804 if (ext_lshift == 0) {
8805 /* special case, only high bits matter */
8806 tcg_gen_mov_i64(tcg_src, tcg_src_hi);
8807 } else {
8808 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8809 tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
8810 tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
8811 }
8812 } else {
8813 if (is_u) {
8814 if (shift == 64) {
8815 /* essentially shifting in 64 zeros */
8816 tcg_gen_movi_i64(tcg_src, 0);
8817 } else {
8818 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8819 }
8820 } else {
8821 if (shift == 64) {
8822 /* effectively extending the sign-bit */
8823 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
8824 } else {
8825 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
8826 }
8827 }
8828 }
8829
8830 if (accumulate) {
8831 tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
8832 } else {
8833 tcg_gen_mov_i64(tcg_res, tcg_src);
8834 }
8835 }
8836
8837 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
handle_scalar_simd_shri(DisasContext * s,bool is_u,int immh,int immb,int opcode,int rn,int rd)8838 static void handle_scalar_simd_shri(DisasContext *s,
8839 bool is_u, int immh, int immb,
8840 int opcode, int rn, int rd)
8841 {
8842 const int size = 3;
8843 int immhb = immh << 3 | immb;
8844 int shift = 2 * (8 << size) - immhb;
8845 bool accumulate = false;
8846 bool round = false;
8847 bool insert = false;
8848 TCGv_i64 tcg_rn;
8849 TCGv_i64 tcg_rd;
8850 TCGv_i64 tcg_round;
8851
8852 if (!extract32(immh, 3, 1)) {
8853 unallocated_encoding(s);
8854 return;
8855 }
8856
8857 if (!fp_access_check(s)) {
8858 return;
8859 }
8860
8861 switch (opcode) {
8862 case 0x02: /* SSRA / USRA (accumulate) */
8863 accumulate = true;
8864 break;
8865 case 0x04: /* SRSHR / URSHR (rounding) */
8866 round = true;
8867 break;
8868 case 0x06: /* SRSRA / URSRA (accum + rounding) */
8869 accumulate = round = true;
8870 break;
8871 case 0x08: /* SRI */
8872 insert = true;
8873 break;
8874 }
8875
8876 if (round) {
8877 tcg_round = tcg_constant_i64(1ULL << (shift - 1));
8878 } else {
8879 tcg_round = NULL;
8880 }
8881
8882 tcg_rn = read_fp_dreg(s, rn);
8883 tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8884
8885 if (insert) {
8886 /* shift count same as element size is valid but does nothing;
8887 * special case to avoid potential shift by 64.
8888 */
8889 int esize = 8 << size;
8890 if (shift != esize) {
8891 tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
8892 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
8893 }
8894 } else {
8895 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8896 accumulate, is_u, size, shift);
8897 }
8898
8899 write_fp_dreg(s, rd, tcg_rd);
8900 }
8901
8902 /* SHL/SLI - Scalar shift left */
handle_scalar_simd_shli(DisasContext * s,bool insert,int immh,int immb,int opcode,int rn,int rd)8903 static void handle_scalar_simd_shli(DisasContext *s, bool insert,
8904 int immh, int immb, int opcode,
8905 int rn, int rd)
8906 {
8907 int size = 32 - clz32(immh) - 1;
8908 int immhb = immh << 3 | immb;
8909 int shift = immhb - (8 << size);
8910 TCGv_i64 tcg_rn;
8911 TCGv_i64 tcg_rd;
8912
8913 if (!extract32(immh, 3, 1)) {
8914 unallocated_encoding(s);
8915 return;
8916 }
8917
8918 if (!fp_access_check(s)) {
8919 return;
8920 }
8921
8922 tcg_rn = read_fp_dreg(s, rn);
8923 tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8924
8925 if (insert) {
8926 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
8927 } else {
8928 tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
8929 }
8930
8931 write_fp_dreg(s, rd, tcg_rd);
8932 }
8933
8934 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
8935 * (signed/unsigned) narrowing */
handle_vec_simd_sqshrn(DisasContext * s,bool is_scalar,bool is_q,bool is_u_shift,bool is_u_narrow,int immh,int immb,int opcode,int rn,int rd)8936 static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
8937 bool is_u_shift, bool is_u_narrow,
8938 int immh, int immb, int opcode,
8939 int rn, int rd)
8940 {
8941 int immhb = immh << 3 | immb;
8942 int size = 32 - clz32(immh) - 1;
8943 int esize = 8 << size;
8944 int shift = (2 * esize) - immhb;
8945 int elements = is_scalar ? 1 : (64 / esize);
8946 bool round = extract32(opcode, 0, 1);
8947 MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
8948 TCGv_i64 tcg_rn, tcg_rd, tcg_round;
8949 TCGv_i32 tcg_rd_narrowed;
8950 TCGv_i64 tcg_final;
8951
8952 static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
8953 { gen_helper_neon_narrow_sat_s8,
8954 gen_helper_neon_unarrow_sat8 },
8955 { gen_helper_neon_narrow_sat_s16,
8956 gen_helper_neon_unarrow_sat16 },
8957 { gen_helper_neon_narrow_sat_s32,
8958 gen_helper_neon_unarrow_sat32 },
8959 { NULL, NULL },
8960 };
8961 static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
8962 gen_helper_neon_narrow_sat_u8,
8963 gen_helper_neon_narrow_sat_u16,
8964 gen_helper_neon_narrow_sat_u32,
8965 NULL
8966 };
8967 NeonGenNarrowEnvFn *narrowfn;
8968
8969 int i;
8970
8971 assert(size < 4);
8972
8973 if (extract32(immh, 3, 1)) {
8974 unallocated_encoding(s);
8975 return;
8976 }
8977
8978 if (!fp_access_check(s)) {
8979 return;
8980 }
8981
8982 if (is_u_shift) {
8983 narrowfn = unsigned_narrow_fns[size];
8984 } else {
8985 narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
8986 }
8987
8988 tcg_rn = tcg_temp_new_i64();
8989 tcg_rd = tcg_temp_new_i64();
8990 tcg_rd_narrowed = tcg_temp_new_i32();
8991 tcg_final = tcg_temp_new_i64();
8992
8993 if (round) {
8994 tcg_round = tcg_constant_i64(1ULL << (shift - 1));
8995 } else {
8996 tcg_round = NULL;
8997 }
8998
8999 for (i = 0; i < elements; i++) {
9000 read_vec_element(s, tcg_rn, rn, i, ldop);
9001 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
9002 false, is_u_shift, size+1, shift);
9003 narrowfn(tcg_rd_narrowed, tcg_env, tcg_rd);
9004 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
9005 if (i == 0) {
9006 tcg_gen_extract_i64(tcg_final, tcg_rd, 0, esize);
9007 } else {
9008 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
9009 }
9010 }
9011
9012 if (!is_q) {
9013 write_vec_element(s, tcg_final, rd, 0, MO_64);
9014 } else {
9015 write_vec_element(s, tcg_final, rd, 1, MO_64);
9016 }
9017 clear_vec_high(s, is_q, rd);
9018 }
9019
9020 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */
handle_simd_qshl(DisasContext * s,bool scalar,bool is_q,bool src_unsigned,bool dst_unsigned,int immh,int immb,int rn,int rd)9021 static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
9022 bool src_unsigned, bool dst_unsigned,
9023 int immh, int immb, int rn, int rd)
9024 {
9025 int immhb = immh << 3 | immb;
9026 int size = 32 - clz32(immh) - 1;
9027 int shift = immhb - (8 << size);
9028 int pass;
9029
9030 assert(immh != 0);
9031 assert(!(scalar && is_q));
9032
9033 if (!scalar) {
9034 if (!is_q && extract32(immh, 3, 1)) {
9035 unallocated_encoding(s);
9036 return;
9037 }
9038
9039 /* Since we use the variable-shift helpers we must
9040 * replicate the shift count into each element of
9041 * the tcg_shift value.
9042 */
9043 switch (size) {
9044 case 0:
9045 shift |= shift << 8;
9046 /* fall through */
9047 case 1:
9048 shift |= shift << 16;
9049 break;
9050 case 2:
9051 case 3:
9052 break;
9053 default:
9054 g_assert_not_reached();
9055 }
9056 }
9057
9058 if (!fp_access_check(s)) {
9059 return;
9060 }
9061
9062 if (size == 3) {
9063 TCGv_i64 tcg_shift = tcg_constant_i64(shift);
9064 static NeonGenTwo64OpEnvFn * const fns[2][2] = {
9065 { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
9066 { NULL, gen_helper_neon_qshl_u64 },
9067 };
9068 NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
9069 int maxpass = is_q ? 2 : 1;
9070
9071 for (pass = 0; pass < maxpass; pass++) {
9072 TCGv_i64 tcg_op = tcg_temp_new_i64();
9073
9074 read_vec_element(s, tcg_op, rn, pass, MO_64);
9075 genfn(tcg_op, tcg_env, tcg_op, tcg_shift);
9076 write_vec_element(s, tcg_op, rd, pass, MO_64);
9077 }
9078 clear_vec_high(s, is_q, rd);
9079 } else {
9080 TCGv_i32 tcg_shift = tcg_constant_i32(shift);
9081 static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
9082 {
9083 { gen_helper_neon_qshl_s8,
9084 gen_helper_neon_qshl_s16,
9085 gen_helper_neon_qshl_s32 },
9086 { gen_helper_neon_qshlu_s8,
9087 gen_helper_neon_qshlu_s16,
9088 gen_helper_neon_qshlu_s32 }
9089 }, {
9090 { NULL, NULL, NULL },
9091 { gen_helper_neon_qshl_u8,
9092 gen_helper_neon_qshl_u16,
9093 gen_helper_neon_qshl_u32 }
9094 }
9095 };
9096 NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
9097 MemOp memop = scalar ? size : MO_32;
9098 int maxpass = scalar ? 1 : is_q ? 4 : 2;
9099
9100 for (pass = 0; pass < maxpass; pass++) {
9101 TCGv_i32 tcg_op = tcg_temp_new_i32();
9102
9103 read_vec_element_i32(s, tcg_op, rn, pass, memop);
9104 genfn(tcg_op, tcg_env, tcg_op, tcg_shift);
9105 if (scalar) {
9106 switch (size) {
9107 case 0:
9108 tcg_gen_ext8u_i32(tcg_op, tcg_op);
9109 break;
9110 case 1:
9111 tcg_gen_ext16u_i32(tcg_op, tcg_op);
9112 break;
9113 case 2:
9114 break;
9115 default:
9116 g_assert_not_reached();
9117 }
9118 write_fp_sreg(s, rd, tcg_op);
9119 } else {
9120 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
9121 }
9122 }
9123
9124 if (!scalar) {
9125 clear_vec_high(s, is_q, rd);
9126 }
9127 }
9128 }
9129
9130 /* Common vector code for handling integer to FP conversion */
handle_simd_intfp_conv(DisasContext * s,int rd,int rn,int elements,int is_signed,int fracbits,int size)9131 static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
9132 int elements, int is_signed,
9133 int fracbits, int size)
9134 {
9135 TCGv_ptr tcg_fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
9136 TCGv_i32 tcg_shift = NULL;
9137
9138 MemOp mop = size | (is_signed ? MO_SIGN : 0);
9139 int pass;
9140
9141 if (fracbits || size == MO_64) {
9142 tcg_shift = tcg_constant_i32(fracbits);
9143 }
9144
9145 if (size == MO_64) {
9146 TCGv_i64 tcg_int64 = tcg_temp_new_i64();
9147 TCGv_i64 tcg_double = tcg_temp_new_i64();
9148
9149 for (pass = 0; pass < elements; pass++) {
9150 read_vec_element(s, tcg_int64, rn, pass, mop);
9151
9152 if (is_signed) {
9153 gen_helper_vfp_sqtod(tcg_double, tcg_int64,
9154 tcg_shift, tcg_fpst);
9155 } else {
9156 gen_helper_vfp_uqtod(tcg_double, tcg_int64,
9157 tcg_shift, tcg_fpst);
9158 }
9159 if (elements == 1) {
9160 write_fp_dreg(s, rd, tcg_double);
9161 } else {
9162 write_vec_element(s, tcg_double, rd, pass, MO_64);
9163 }
9164 }
9165 } else {
9166 TCGv_i32 tcg_int32 = tcg_temp_new_i32();
9167 TCGv_i32 tcg_float = tcg_temp_new_i32();
9168
9169 for (pass = 0; pass < elements; pass++) {
9170 read_vec_element_i32(s, tcg_int32, rn, pass, mop);
9171
9172 switch (size) {
9173 case MO_32:
9174 if (fracbits) {
9175 if (is_signed) {
9176 gen_helper_vfp_sltos(tcg_float, tcg_int32,
9177 tcg_shift, tcg_fpst);
9178 } else {
9179 gen_helper_vfp_ultos(tcg_float, tcg_int32,
9180 tcg_shift, tcg_fpst);
9181 }
9182 } else {
9183 if (is_signed) {
9184 gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
9185 } else {
9186 gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
9187 }
9188 }
9189 break;
9190 case MO_16:
9191 if (fracbits) {
9192 if (is_signed) {
9193 gen_helper_vfp_sltoh(tcg_float, tcg_int32,
9194 tcg_shift, tcg_fpst);
9195 } else {
9196 gen_helper_vfp_ultoh(tcg_float, tcg_int32,
9197 tcg_shift, tcg_fpst);
9198 }
9199 } else {
9200 if (is_signed) {
9201 gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
9202 } else {
9203 gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
9204 }
9205 }
9206 break;
9207 default:
9208 g_assert_not_reached();
9209 }
9210
9211 if (elements == 1) {
9212 write_fp_sreg(s, rd, tcg_float);
9213 } else {
9214 write_vec_element_i32(s, tcg_float, rd, pass, size);
9215 }
9216 }
9217 }
9218
9219 clear_vec_high(s, elements << size == 16, rd);
9220 }
9221
9222 /* UCVTF/SCVTF - Integer to FP conversion */
handle_simd_shift_intfp_conv(DisasContext * s,bool is_scalar,bool is_q,bool is_u,int immh,int immb,int opcode,int rn,int rd)9223 static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
9224 bool is_q, bool is_u,
9225 int immh, int immb, int opcode,
9226 int rn, int rd)
9227 {
9228 int size, elements, fracbits;
9229 int immhb = immh << 3 | immb;
9230
9231 if (immh & 8) {
9232 size = MO_64;
9233 if (!is_scalar && !is_q) {
9234 unallocated_encoding(s);
9235 return;
9236 }
9237 } else if (immh & 4) {
9238 size = MO_32;
9239 } else if (immh & 2) {
9240 size = MO_16;
9241 if (!dc_isar_feature(aa64_fp16, s)) {
9242 unallocated_encoding(s);
9243 return;
9244 }
9245 } else {
9246 /* immh == 0 would be a failure of the decode logic */
9247 g_assert(immh == 1);
9248 unallocated_encoding(s);
9249 return;
9250 }
9251
9252 if (is_scalar) {
9253 elements = 1;
9254 } else {
9255 elements = (8 << is_q) >> size;
9256 }
9257 fracbits = (16 << size) - immhb;
9258
9259 if (!fp_access_check(s)) {
9260 return;
9261 }
9262
9263 handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
9264 }
9265
9266 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
handle_simd_shift_fpint_conv(DisasContext * s,bool is_scalar,bool is_q,bool is_u,int immh,int immb,int rn,int rd)9267 static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
9268 bool is_q, bool is_u,
9269 int immh, int immb, int rn, int rd)
9270 {
9271 int immhb = immh << 3 | immb;
9272 int pass, size, fracbits;
9273 TCGv_ptr tcg_fpstatus;
9274 TCGv_i32 tcg_rmode, tcg_shift;
9275
9276 if (immh & 0x8) {
9277 size = MO_64;
9278 if (!is_scalar && !is_q) {
9279 unallocated_encoding(s);
9280 return;
9281 }
9282 } else if (immh & 0x4) {
9283 size = MO_32;
9284 } else if (immh & 0x2) {
9285 size = MO_16;
9286 if (!dc_isar_feature(aa64_fp16, s)) {
9287 unallocated_encoding(s);
9288 return;
9289 }
9290 } else {
9291 /* Should have split out AdvSIMD modified immediate earlier. */
9292 assert(immh == 1);
9293 unallocated_encoding(s);
9294 return;
9295 }
9296
9297 if (!fp_access_check(s)) {
9298 return;
9299 }
9300
9301 assert(!(is_scalar && is_q));
9302
9303 tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
9304 tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, tcg_fpstatus);
9305 fracbits = (16 << size) - immhb;
9306 tcg_shift = tcg_constant_i32(fracbits);
9307
9308 if (size == MO_64) {
9309 int maxpass = is_scalar ? 1 : 2;
9310
9311 for (pass = 0; pass < maxpass; pass++) {
9312 TCGv_i64 tcg_op = tcg_temp_new_i64();
9313
9314 read_vec_element(s, tcg_op, rn, pass, MO_64);
9315 if (is_u) {
9316 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9317 } else {
9318 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9319 }
9320 write_vec_element(s, tcg_op, rd, pass, MO_64);
9321 }
9322 clear_vec_high(s, is_q, rd);
9323 } else {
9324 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
9325 int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
9326
9327 switch (size) {
9328 case MO_16:
9329 if (is_u) {
9330 fn = gen_helper_vfp_touhh;
9331 } else {
9332 fn = gen_helper_vfp_toshh;
9333 }
9334 break;
9335 case MO_32:
9336 if (is_u) {
9337 fn = gen_helper_vfp_touls;
9338 } else {
9339 fn = gen_helper_vfp_tosls;
9340 }
9341 break;
9342 default:
9343 g_assert_not_reached();
9344 }
9345
9346 for (pass = 0; pass < maxpass; pass++) {
9347 TCGv_i32 tcg_op = tcg_temp_new_i32();
9348
9349 read_vec_element_i32(s, tcg_op, rn, pass, size);
9350 fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9351 if (is_scalar) {
9352 if (size == MO_16 && !is_u) {
9353 tcg_gen_ext16u_i32(tcg_op, tcg_op);
9354 }
9355 write_fp_sreg(s, rd, tcg_op);
9356 } else {
9357 write_vec_element_i32(s, tcg_op, rd, pass, size);
9358 }
9359 }
9360 if (!is_scalar) {
9361 clear_vec_high(s, is_q, rd);
9362 }
9363 }
9364
9365 gen_restore_rmode(tcg_rmode, tcg_fpstatus);
9366 }
9367
9368 /* AdvSIMD scalar shift by immediate
9369 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
9370 * +-----+---+-------------+------+------+--------+---+------+------+
9371 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
9372 * +-----+---+-------------+------+------+--------+---+------+------+
9373 *
9374 * This is the scalar version so it works on a fixed sized registers
9375 */
disas_simd_scalar_shift_imm(DisasContext * s,uint32_t insn)9376 static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
9377 {
9378 int rd = extract32(insn, 0, 5);
9379 int rn = extract32(insn, 5, 5);
9380 int opcode = extract32(insn, 11, 5);
9381 int immb = extract32(insn, 16, 3);
9382 int immh = extract32(insn, 19, 4);
9383 bool is_u = extract32(insn, 29, 1);
9384
9385 if (immh == 0) {
9386 unallocated_encoding(s);
9387 return;
9388 }
9389
9390 switch (opcode) {
9391 case 0x08: /* SRI */
9392 if (!is_u) {
9393 unallocated_encoding(s);
9394 return;
9395 }
9396 /* fall through */
9397 case 0x00: /* SSHR / USHR */
9398 case 0x02: /* SSRA / USRA */
9399 case 0x04: /* SRSHR / URSHR */
9400 case 0x06: /* SRSRA / URSRA */
9401 handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
9402 break;
9403 case 0x0a: /* SHL / SLI */
9404 handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
9405 break;
9406 case 0x1c: /* SCVTF, UCVTF */
9407 handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
9408 opcode, rn, rd);
9409 break;
9410 case 0x10: /* SQSHRUN, SQSHRUN2 */
9411 case 0x11: /* SQRSHRUN, SQRSHRUN2 */
9412 if (!is_u) {
9413 unallocated_encoding(s);
9414 return;
9415 }
9416 handle_vec_simd_sqshrn(s, true, false, false, true,
9417 immh, immb, opcode, rn, rd);
9418 break;
9419 case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
9420 case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
9421 handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
9422 immh, immb, opcode, rn, rd);
9423 break;
9424 case 0xc: /* SQSHLU */
9425 if (!is_u) {
9426 unallocated_encoding(s);
9427 return;
9428 }
9429 handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
9430 break;
9431 case 0xe: /* SQSHL, UQSHL */
9432 handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
9433 break;
9434 case 0x1f: /* FCVTZS, FCVTZU */
9435 handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
9436 break;
9437 default:
9438 unallocated_encoding(s);
9439 break;
9440 }
9441 }
9442
9443 /* AdvSIMD scalar three different
9444 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
9445 * +-----+---+-----------+------+---+------+--------+-----+------+------+
9446 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
9447 * +-----+---+-----------+------+---+------+--------+-----+------+------+
9448 */
disas_simd_scalar_three_reg_diff(DisasContext * s,uint32_t insn)9449 static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
9450 {
9451 bool is_u = extract32(insn, 29, 1);
9452 int size = extract32(insn, 22, 2);
9453 int opcode = extract32(insn, 12, 4);
9454 int rm = extract32(insn, 16, 5);
9455 int rn = extract32(insn, 5, 5);
9456 int rd = extract32(insn, 0, 5);
9457
9458 if (is_u) {
9459 unallocated_encoding(s);
9460 return;
9461 }
9462
9463 switch (opcode) {
9464 case 0x9: /* SQDMLAL, SQDMLAL2 */
9465 case 0xb: /* SQDMLSL, SQDMLSL2 */
9466 case 0xd: /* SQDMULL, SQDMULL2 */
9467 if (size == 0 || size == 3) {
9468 unallocated_encoding(s);
9469 return;
9470 }
9471 break;
9472 default:
9473 unallocated_encoding(s);
9474 return;
9475 }
9476
9477 if (!fp_access_check(s)) {
9478 return;
9479 }
9480
9481 if (size == 2) {
9482 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9483 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9484 TCGv_i64 tcg_res = tcg_temp_new_i64();
9485
9486 read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
9487 read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
9488
9489 tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
9490 gen_helper_neon_addl_saturate_s64(tcg_res, tcg_env, tcg_res, tcg_res);
9491
9492 switch (opcode) {
9493 case 0xd: /* SQDMULL, SQDMULL2 */
9494 break;
9495 case 0xb: /* SQDMLSL, SQDMLSL2 */
9496 tcg_gen_neg_i64(tcg_res, tcg_res);
9497 /* fall through */
9498 case 0x9: /* SQDMLAL, SQDMLAL2 */
9499 read_vec_element(s, tcg_op1, rd, 0, MO_64);
9500 gen_helper_neon_addl_saturate_s64(tcg_res, tcg_env,
9501 tcg_res, tcg_op1);
9502 break;
9503 default:
9504 g_assert_not_reached();
9505 }
9506
9507 write_fp_dreg(s, rd, tcg_res);
9508 } else {
9509 TCGv_i32 tcg_op1 = read_fp_hreg(s, rn);
9510 TCGv_i32 tcg_op2 = read_fp_hreg(s, rm);
9511 TCGv_i64 tcg_res = tcg_temp_new_i64();
9512
9513 gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
9514 gen_helper_neon_addl_saturate_s32(tcg_res, tcg_env, tcg_res, tcg_res);
9515
9516 switch (opcode) {
9517 case 0xd: /* SQDMULL, SQDMULL2 */
9518 break;
9519 case 0xb: /* SQDMLSL, SQDMLSL2 */
9520 gen_helper_neon_negl_u32(tcg_res, tcg_res);
9521 /* fall through */
9522 case 0x9: /* SQDMLAL, SQDMLAL2 */
9523 {
9524 TCGv_i64 tcg_op3 = tcg_temp_new_i64();
9525 read_vec_element(s, tcg_op3, rd, 0, MO_32);
9526 gen_helper_neon_addl_saturate_s32(tcg_res, tcg_env,
9527 tcg_res, tcg_op3);
9528 break;
9529 }
9530 default:
9531 g_assert_not_reached();
9532 }
9533
9534 tcg_gen_ext32u_i64(tcg_res, tcg_res);
9535 write_fp_dreg(s, rd, tcg_res);
9536 }
9537 }
9538
9539 /* AdvSIMD scalar three same extra
9540 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
9541 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9542 * | 0 1 | U | 1 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
9543 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9544 */
disas_simd_scalar_three_reg_same_extra(DisasContext * s,uint32_t insn)9545 static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
9546 uint32_t insn)
9547 {
9548 int rd = extract32(insn, 0, 5);
9549 int rn = extract32(insn, 5, 5);
9550 int opcode = extract32(insn, 11, 4);
9551 int rm = extract32(insn, 16, 5);
9552 int size = extract32(insn, 22, 2);
9553 bool u = extract32(insn, 29, 1);
9554 TCGv_i32 ele1, ele2, ele3;
9555 TCGv_i64 res;
9556 bool feature;
9557
9558 switch (u * 16 + opcode) {
9559 case 0x10: /* SQRDMLAH (vector) */
9560 case 0x11: /* SQRDMLSH (vector) */
9561 if (size != 1 && size != 2) {
9562 unallocated_encoding(s);
9563 return;
9564 }
9565 feature = dc_isar_feature(aa64_rdm, s);
9566 break;
9567 default:
9568 unallocated_encoding(s);
9569 return;
9570 }
9571 if (!feature) {
9572 unallocated_encoding(s);
9573 return;
9574 }
9575 if (!fp_access_check(s)) {
9576 return;
9577 }
9578
9579 /* Do a single operation on the lowest element in the vector.
9580 * We use the standard Neon helpers and rely on 0 OP 0 == 0
9581 * with no side effects for all these operations.
9582 * OPTME: special-purpose helpers would avoid doing some
9583 * unnecessary work in the helper for the 16 bit cases.
9584 */
9585 ele1 = tcg_temp_new_i32();
9586 ele2 = tcg_temp_new_i32();
9587 ele3 = tcg_temp_new_i32();
9588
9589 read_vec_element_i32(s, ele1, rn, 0, size);
9590 read_vec_element_i32(s, ele2, rm, 0, size);
9591 read_vec_element_i32(s, ele3, rd, 0, size);
9592
9593 switch (opcode) {
9594 case 0x0: /* SQRDMLAH */
9595 if (size == 1) {
9596 gen_helper_neon_qrdmlah_s16(ele3, tcg_env, ele1, ele2, ele3);
9597 } else {
9598 gen_helper_neon_qrdmlah_s32(ele3, tcg_env, ele1, ele2, ele3);
9599 }
9600 break;
9601 case 0x1: /* SQRDMLSH */
9602 if (size == 1) {
9603 gen_helper_neon_qrdmlsh_s16(ele3, tcg_env, ele1, ele2, ele3);
9604 } else {
9605 gen_helper_neon_qrdmlsh_s32(ele3, tcg_env, ele1, ele2, ele3);
9606 }
9607 break;
9608 default:
9609 g_assert_not_reached();
9610 }
9611
9612 res = tcg_temp_new_i64();
9613 tcg_gen_extu_i32_i64(res, ele3);
9614 write_fp_dreg(s, rd, res);
9615 }
9616
handle_2misc_64(DisasContext * s,int opcode,bool u,TCGv_i64 tcg_rd,TCGv_i64 tcg_rn,TCGv_i32 tcg_rmode,TCGv_ptr tcg_fpstatus)9617 static void handle_2misc_64(DisasContext *s, int opcode, bool u,
9618 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
9619 TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
9620 {
9621 /* Handle 64->64 opcodes which are shared between the scalar and
9622 * vector 2-reg-misc groups. We cover every integer opcode where size == 3
9623 * is valid in either group and also the double-precision fp ops.
9624 * The caller only need provide tcg_rmode and tcg_fpstatus if the op
9625 * requires them.
9626 */
9627 TCGCond cond;
9628
9629 switch (opcode) {
9630 case 0x4: /* CLS, CLZ */
9631 if (u) {
9632 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
9633 } else {
9634 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
9635 }
9636 break;
9637 case 0x5: /* NOT */
9638 /* This opcode is shared with CNT and RBIT but we have earlier
9639 * enforced that size == 3 if and only if this is the NOT insn.
9640 */
9641 tcg_gen_not_i64(tcg_rd, tcg_rn);
9642 break;
9643 case 0x7: /* SQABS, SQNEG */
9644 if (u) {
9645 gen_helper_neon_qneg_s64(tcg_rd, tcg_env, tcg_rn);
9646 } else {
9647 gen_helper_neon_qabs_s64(tcg_rd, tcg_env, tcg_rn);
9648 }
9649 break;
9650 case 0xa: /* CMLT */
9651 cond = TCG_COND_LT;
9652 do_cmop:
9653 /* 64 bit integer comparison against zero, result is test ? -1 : 0. */
9654 tcg_gen_negsetcond_i64(cond, tcg_rd, tcg_rn, tcg_constant_i64(0));
9655 break;
9656 case 0x8: /* CMGT, CMGE */
9657 cond = u ? TCG_COND_GE : TCG_COND_GT;
9658 goto do_cmop;
9659 case 0x9: /* CMEQ, CMLE */
9660 cond = u ? TCG_COND_LE : TCG_COND_EQ;
9661 goto do_cmop;
9662 case 0xb: /* ABS, NEG */
9663 if (u) {
9664 tcg_gen_neg_i64(tcg_rd, tcg_rn);
9665 } else {
9666 tcg_gen_abs_i64(tcg_rd, tcg_rn);
9667 }
9668 break;
9669 case 0x2f: /* FABS */
9670 gen_vfp_absd(tcg_rd, tcg_rn);
9671 break;
9672 case 0x6f: /* FNEG */
9673 gen_vfp_negd(tcg_rd, tcg_rn);
9674 break;
9675 case 0x7f: /* FSQRT */
9676 gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, tcg_env);
9677 break;
9678 case 0x1a: /* FCVTNS */
9679 case 0x1b: /* FCVTMS */
9680 case 0x1c: /* FCVTAS */
9681 case 0x3a: /* FCVTPS */
9682 case 0x3b: /* FCVTZS */
9683 gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus);
9684 break;
9685 case 0x5a: /* FCVTNU */
9686 case 0x5b: /* FCVTMU */
9687 case 0x5c: /* FCVTAU */
9688 case 0x7a: /* FCVTPU */
9689 case 0x7b: /* FCVTZU */
9690 gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus);
9691 break;
9692 case 0x18: /* FRINTN */
9693 case 0x19: /* FRINTM */
9694 case 0x38: /* FRINTP */
9695 case 0x39: /* FRINTZ */
9696 case 0x58: /* FRINTA */
9697 case 0x79: /* FRINTI */
9698 gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
9699 break;
9700 case 0x59: /* FRINTX */
9701 gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
9702 break;
9703 case 0x1e: /* FRINT32Z */
9704 case 0x5e: /* FRINT32X */
9705 gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus);
9706 break;
9707 case 0x1f: /* FRINT64Z */
9708 case 0x5f: /* FRINT64X */
9709 gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus);
9710 break;
9711 default:
9712 g_assert_not_reached();
9713 }
9714 }
9715
handle_2misc_fcmp_zero(DisasContext * s,int opcode,bool is_scalar,bool is_u,bool is_q,int size,int rn,int rd)9716 static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
9717 bool is_scalar, bool is_u, bool is_q,
9718 int size, int rn, int rd)
9719 {
9720 bool is_double = (size == MO_64);
9721 TCGv_ptr fpst;
9722
9723 if (!fp_access_check(s)) {
9724 return;
9725 }
9726
9727 fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
9728
9729 if (is_double) {
9730 TCGv_i64 tcg_op = tcg_temp_new_i64();
9731 TCGv_i64 tcg_zero = tcg_constant_i64(0);
9732 TCGv_i64 tcg_res = tcg_temp_new_i64();
9733 NeonGenTwoDoubleOpFn *genfn;
9734 bool swap = false;
9735 int pass;
9736
9737 switch (opcode) {
9738 case 0x2e: /* FCMLT (zero) */
9739 swap = true;
9740 /* fallthrough */
9741 case 0x2c: /* FCMGT (zero) */
9742 genfn = gen_helper_neon_cgt_f64;
9743 break;
9744 case 0x2d: /* FCMEQ (zero) */
9745 genfn = gen_helper_neon_ceq_f64;
9746 break;
9747 case 0x6d: /* FCMLE (zero) */
9748 swap = true;
9749 /* fall through */
9750 case 0x6c: /* FCMGE (zero) */
9751 genfn = gen_helper_neon_cge_f64;
9752 break;
9753 default:
9754 g_assert_not_reached();
9755 }
9756
9757 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9758 read_vec_element(s, tcg_op, rn, pass, MO_64);
9759 if (swap) {
9760 genfn(tcg_res, tcg_zero, tcg_op, fpst);
9761 } else {
9762 genfn(tcg_res, tcg_op, tcg_zero, fpst);
9763 }
9764 write_vec_element(s, tcg_res, rd, pass, MO_64);
9765 }
9766
9767 clear_vec_high(s, !is_scalar, rd);
9768 } else {
9769 TCGv_i32 tcg_op = tcg_temp_new_i32();
9770 TCGv_i32 tcg_zero = tcg_constant_i32(0);
9771 TCGv_i32 tcg_res = tcg_temp_new_i32();
9772 NeonGenTwoSingleOpFn *genfn;
9773 bool swap = false;
9774 int pass, maxpasses;
9775
9776 if (size == MO_16) {
9777 switch (opcode) {
9778 case 0x2e: /* FCMLT (zero) */
9779 swap = true;
9780 /* fall through */
9781 case 0x2c: /* FCMGT (zero) */
9782 genfn = gen_helper_advsimd_cgt_f16;
9783 break;
9784 case 0x2d: /* FCMEQ (zero) */
9785 genfn = gen_helper_advsimd_ceq_f16;
9786 break;
9787 case 0x6d: /* FCMLE (zero) */
9788 swap = true;
9789 /* fall through */
9790 case 0x6c: /* FCMGE (zero) */
9791 genfn = gen_helper_advsimd_cge_f16;
9792 break;
9793 default:
9794 g_assert_not_reached();
9795 }
9796 } else {
9797 switch (opcode) {
9798 case 0x2e: /* FCMLT (zero) */
9799 swap = true;
9800 /* fall through */
9801 case 0x2c: /* FCMGT (zero) */
9802 genfn = gen_helper_neon_cgt_f32;
9803 break;
9804 case 0x2d: /* FCMEQ (zero) */
9805 genfn = gen_helper_neon_ceq_f32;
9806 break;
9807 case 0x6d: /* FCMLE (zero) */
9808 swap = true;
9809 /* fall through */
9810 case 0x6c: /* FCMGE (zero) */
9811 genfn = gen_helper_neon_cge_f32;
9812 break;
9813 default:
9814 g_assert_not_reached();
9815 }
9816 }
9817
9818 if (is_scalar) {
9819 maxpasses = 1;
9820 } else {
9821 int vector_size = 8 << is_q;
9822 maxpasses = vector_size >> size;
9823 }
9824
9825 for (pass = 0; pass < maxpasses; pass++) {
9826 read_vec_element_i32(s, tcg_op, rn, pass, size);
9827 if (swap) {
9828 genfn(tcg_res, tcg_zero, tcg_op, fpst);
9829 } else {
9830 genfn(tcg_res, tcg_op, tcg_zero, fpst);
9831 }
9832 if (is_scalar) {
9833 write_fp_sreg(s, rd, tcg_res);
9834 } else {
9835 write_vec_element_i32(s, tcg_res, rd, pass, size);
9836 }
9837 }
9838
9839 if (!is_scalar) {
9840 clear_vec_high(s, is_q, rd);
9841 }
9842 }
9843 }
9844
handle_2misc_reciprocal(DisasContext * s,int opcode,bool is_scalar,bool is_u,bool is_q,int size,int rn,int rd)9845 static void handle_2misc_reciprocal(DisasContext *s, int opcode,
9846 bool is_scalar, bool is_u, bool is_q,
9847 int size, int rn, int rd)
9848 {
9849 bool is_double = (size == 3);
9850 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9851
9852 if (is_double) {
9853 TCGv_i64 tcg_op = tcg_temp_new_i64();
9854 TCGv_i64 tcg_res = tcg_temp_new_i64();
9855 int pass;
9856
9857 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9858 read_vec_element(s, tcg_op, rn, pass, MO_64);
9859 switch (opcode) {
9860 case 0x3d: /* FRECPE */
9861 gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
9862 break;
9863 case 0x3f: /* FRECPX */
9864 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
9865 break;
9866 case 0x7d: /* FRSQRTE */
9867 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
9868 break;
9869 default:
9870 g_assert_not_reached();
9871 }
9872 write_vec_element(s, tcg_res, rd, pass, MO_64);
9873 }
9874 clear_vec_high(s, !is_scalar, rd);
9875 } else {
9876 TCGv_i32 tcg_op = tcg_temp_new_i32();
9877 TCGv_i32 tcg_res = tcg_temp_new_i32();
9878 int pass, maxpasses;
9879
9880 if (is_scalar) {
9881 maxpasses = 1;
9882 } else {
9883 maxpasses = is_q ? 4 : 2;
9884 }
9885
9886 for (pass = 0; pass < maxpasses; pass++) {
9887 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
9888
9889 switch (opcode) {
9890 case 0x3c: /* URECPE */
9891 gen_helper_recpe_u32(tcg_res, tcg_op);
9892 break;
9893 case 0x3d: /* FRECPE */
9894 gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
9895 break;
9896 case 0x3f: /* FRECPX */
9897 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
9898 break;
9899 case 0x7d: /* FRSQRTE */
9900 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
9901 break;
9902 default:
9903 g_assert_not_reached();
9904 }
9905
9906 if (is_scalar) {
9907 write_fp_sreg(s, rd, tcg_res);
9908 } else {
9909 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9910 }
9911 }
9912 if (!is_scalar) {
9913 clear_vec_high(s, is_q, rd);
9914 }
9915 }
9916 }
9917
handle_2misc_narrow(DisasContext * s,bool scalar,int opcode,bool u,bool is_q,int size,int rn,int rd)9918 static void handle_2misc_narrow(DisasContext *s, bool scalar,
9919 int opcode, bool u, bool is_q,
9920 int size, int rn, int rd)
9921 {
9922 /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
9923 * in the source becomes a size element in the destination).
9924 */
9925 int pass;
9926 TCGv_i32 tcg_res[2];
9927 int destelt = is_q ? 2 : 0;
9928 int passes = scalar ? 1 : 2;
9929
9930 if (scalar) {
9931 tcg_res[1] = tcg_constant_i32(0);
9932 }
9933
9934 for (pass = 0; pass < passes; pass++) {
9935 TCGv_i64 tcg_op = tcg_temp_new_i64();
9936 NeonGenNarrowFn *genfn = NULL;
9937 NeonGenNarrowEnvFn *genenvfn = NULL;
9938
9939 if (scalar) {
9940 read_vec_element(s, tcg_op, rn, pass, size + 1);
9941 } else {
9942 read_vec_element(s, tcg_op, rn, pass, MO_64);
9943 }
9944 tcg_res[pass] = tcg_temp_new_i32();
9945
9946 switch (opcode) {
9947 case 0x12: /* XTN, SQXTUN */
9948 {
9949 static NeonGenNarrowFn * const xtnfns[3] = {
9950 gen_helper_neon_narrow_u8,
9951 gen_helper_neon_narrow_u16,
9952 tcg_gen_extrl_i64_i32,
9953 };
9954 static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
9955 gen_helper_neon_unarrow_sat8,
9956 gen_helper_neon_unarrow_sat16,
9957 gen_helper_neon_unarrow_sat32,
9958 };
9959 if (u) {
9960 genenvfn = sqxtunfns[size];
9961 } else {
9962 genfn = xtnfns[size];
9963 }
9964 break;
9965 }
9966 case 0x14: /* SQXTN, UQXTN */
9967 {
9968 static NeonGenNarrowEnvFn * const fns[3][2] = {
9969 { gen_helper_neon_narrow_sat_s8,
9970 gen_helper_neon_narrow_sat_u8 },
9971 { gen_helper_neon_narrow_sat_s16,
9972 gen_helper_neon_narrow_sat_u16 },
9973 { gen_helper_neon_narrow_sat_s32,
9974 gen_helper_neon_narrow_sat_u32 },
9975 };
9976 genenvfn = fns[size][u];
9977 break;
9978 }
9979 case 0x16: /* FCVTN, FCVTN2 */
9980 /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
9981 if (size == 2) {
9982 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, tcg_env);
9983 } else {
9984 TCGv_i32 tcg_lo = tcg_temp_new_i32();
9985 TCGv_i32 tcg_hi = tcg_temp_new_i32();
9986 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9987 TCGv_i32 ahp = get_ahp_flag();
9988
9989 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
9990 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
9991 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
9992 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
9993 }
9994 break;
9995 case 0x36: /* BFCVTN, BFCVTN2 */
9996 {
9997 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9998 gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst);
9999 }
10000 break;
10001 case 0x56: /* FCVTXN, FCVTXN2 */
10002 /* 64 bit to 32 bit float conversion
10003 * with von Neumann rounding (round to odd)
10004 */
10005 assert(size == 2);
10006 gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, tcg_env);
10007 break;
10008 default:
10009 g_assert_not_reached();
10010 }
10011
10012 if (genfn) {
10013 genfn(tcg_res[pass], tcg_op);
10014 } else if (genenvfn) {
10015 genenvfn(tcg_res[pass], tcg_env, tcg_op);
10016 }
10017 }
10018
10019 for (pass = 0; pass < 2; pass++) {
10020 write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
10021 }
10022 clear_vec_high(s, is_q, rd);
10023 }
10024
10025 /* AdvSIMD scalar two reg misc
10026 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
10027 * +-----+---+-----------+------+-----------+--------+-----+------+------+
10028 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
10029 * +-----+---+-----------+------+-----------+--------+-----+------+------+
10030 */
disas_simd_scalar_two_reg_misc(DisasContext * s,uint32_t insn)10031 static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
10032 {
10033 int rd = extract32(insn, 0, 5);
10034 int rn = extract32(insn, 5, 5);
10035 int opcode = extract32(insn, 12, 5);
10036 int size = extract32(insn, 22, 2);
10037 bool u = extract32(insn, 29, 1);
10038 bool is_fcvt = false;
10039 int rmode;
10040 TCGv_i32 tcg_rmode;
10041 TCGv_ptr tcg_fpstatus;
10042
10043 switch (opcode) {
10044 case 0x7: /* SQABS / SQNEG */
10045 break;
10046 case 0xa: /* CMLT */
10047 if (u) {
10048 unallocated_encoding(s);
10049 return;
10050 }
10051 /* fall through */
10052 case 0x8: /* CMGT, CMGE */
10053 case 0x9: /* CMEQ, CMLE */
10054 case 0xb: /* ABS, NEG */
10055 if (size != 3) {
10056 unallocated_encoding(s);
10057 return;
10058 }
10059 break;
10060 case 0x12: /* SQXTUN */
10061 if (!u) {
10062 unallocated_encoding(s);
10063 return;
10064 }
10065 /* fall through */
10066 case 0x14: /* SQXTN, UQXTN */
10067 if (size == 3) {
10068 unallocated_encoding(s);
10069 return;
10070 }
10071 if (!fp_access_check(s)) {
10072 return;
10073 }
10074 handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
10075 return;
10076 case 0xc ... 0xf:
10077 case 0x16 ... 0x1d:
10078 case 0x1f:
10079 /* Floating point: U, size[1] and opcode indicate operation;
10080 * size[0] indicates single or double precision.
10081 */
10082 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
10083 size = extract32(size, 0, 1) ? 3 : 2;
10084 switch (opcode) {
10085 case 0x2c: /* FCMGT (zero) */
10086 case 0x2d: /* FCMEQ (zero) */
10087 case 0x2e: /* FCMLT (zero) */
10088 case 0x6c: /* FCMGE (zero) */
10089 case 0x6d: /* FCMLE (zero) */
10090 handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
10091 return;
10092 case 0x1d: /* SCVTF */
10093 case 0x5d: /* UCVTF */
10094 {
10095 bool is_signed = (opcode == 0x1d);
10096 if (!fp_access_check(s)) {
10097 return;
10098 }
10099 handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
10100 return;
10101 }
10102 case 0x3d: /* FRECPE */
10103 case 0x3f: /* FRECPX */
10104 case 0x7d: /* FRSQRTE */
10105 if (!fp_access_check(s)) {
10106 return;
10107 }
10108 handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
10109 return;
10110 case 0x1a: /* FCVTNS */
10111 case 0x1b: /* FCVTMS */
10112 case 0x3a: /* FCVTPS */
10113 case 0x3b: /* FCVTZS */
10114 case 0x5a: /* FCVTNU */
10115 case 0x5b: /* FCVTMU */
10116 case 0x7a: /* FCVTPU */
10117 case 0x7b: /* FCVTZU */
10118 is_fcvt = true;
10119 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
10120 break;
10121 case 0x1c: /* FCVTAS */
10122 case 0x5c: /* FCVTAU */
10123 /* TIEAWAY doesn't fit in the usual rounding mode encoding */
10124 is_fcvt = true;
10125 rmode = FPROUNDING_TIEAWAY;
10126 break;
10127 case 0x56: /* FCVTXN, FCVTXN2 */
10128 if (size == 2) {
10129 unallocated_encoding(s);
10130 return;
10131 }
10132 if (!fp_access_check(s)) {
10133 return;
10134 }
10135 handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
10136 return;
10137 default:
10138 unallocated_encoding(s);
10139 return;
10140 }
10141 break;
10142 default:
10143 case 0x3: /* USQADD / SUQADD */
10144 unallocated_encoding(s);
10145 return;
10146 }
10147
10148 if (!fp_access_check(s)) {
10149 return;
10150 }
10151
10152 if (is_fcvt) {
10153 tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
10154 tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
10155 } else {
10156 tcg_fpstatus = NULL;
10157 tcg_rmode = NULL;
10158 }
10159
10160 if (size == 3) {
10161 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
10162 TCGv_i64 tcg_rd = tcg_temp_new_i64();
10163
10164 handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
10165 write_fp_dreg(s, rd, tcg_rd);
10166 } else {
10167 TCGv_i32 tcg_rn = tcg_temp_new_i32();
10168 TCGv_i32 tcg_rd = tcg_temp_new_i32();
10169
10170 read_vec_element_i32(s, tcg_rn, rn, 0, size);
10171
10172 switch (opcode) {
10173 case 0x7: /* SQABS, SQNEG */
10174 {
10175 NeonGenOneOpEnvFn *genfn;
10176 static NeonGenOneOpEnvFn * const fns[3][2] = {
10177 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
10178 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
10179 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
10180 };
10181 genfn = fns[size][u];
10182 genfn(tcg_rd, tcg_env, tcg_rn);
10183 break;
10184 }
10185 case 0x1a: /* FCVTNS */
10186 case 0x1b: /* FCVTMS */
10187 case 0x1c: /* FCVTAS */
10188 case 0x3a: /* FCVTPS */
10189 case 0x3b: /* FCVTZS */
10190 gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_constant_i32(0),
10191 tcg_fpstatus);
10192 break;
10193 case 0x5a: /* FCVTNU */
10194 case 0x5b: /* FCVTMU */
10195 case 0x5c: /* FCVTAU */
10196 case 0x7a: /* FCVTPU */
10197 case 0x7b: /* FCVTZU */
10198 gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_constant_i32(0),
10199 tcg_fpstatus);
10200 break;
10201 default:
10202 g_assert_not_reached();
10203 }
10204
10205 write_fp_sreg(s, rd, tcg_rd);
10206 }
10207
10208 if (is_fcvt) {
10209 gen_restore_rmode(tcg_rmode, tcg_fpstatus);
10210 }
10211 }
10212
10213 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
handle_vec_simd_shri(DisasContext * s,bool is_q,bool is_u,int immh,int immb,int opcode,int rn,int rd)10214 static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
10215 int immh, int immb, int opcode, int rn, int rd)
10216 {
10217 int size = 32 - clz32(immh) - 1;
10218 int immhb = immh << 3 | immb;
10219 int shift = 2 * (8 << size) - immhb;
10220 GVecGen2iFn *gvec_fn;
10221
10222 if (extract32(immh, 3, 1) && !is_q) {
10223 unallocated_encoding(s);
10224 return;
10225 }
10226 tcg_debug_assert(size <= 3);
10227
10228 if (!fp_access_check(s)) {
10229 return;
10230 }
10231
10232 switch (opcode) {
10233 case 0x02: /* SSRA / USRA (accumulate) */
10234 gvec_fn = is_u ? gen_gvec_usra : gen_gvec_ssra;
10235 break;
10236
10237 case 0x08: /* SRI */
10238 gvec_fn = gen_gvec_sri;
10239 break;
10240
10241 case 0x00: /* SSHR / USHR */
10242 if (is_u) {
10243 if (shift == 8 << size) {
10244 /* Shift count the same size as element size produces zero. */
10245 tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
10246 is_q ? 16 : 8, vec_full_reg_size(s), 0);
10247 return;
10248 }
10249 gvec_fn = tcg_gen_gvec_shri;
10250 } else {
10251 /* Shift count the same size as element size produces all sign. */
10252 if (shift == 8 << size) {
10253 shift -= 1;
10254 }
10255 gvec_fn = tcg_gen_gvec_sari;
10256 }
10257 break;
10258
10259 case 0x04: /* SRSHR / URSHR (rounding) */
10260 gvec_fn = is_u ? gen_gvec_urshr : gen_gvec_srshr;
10261 break;
10262
10263 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10264 gvec_fn = is_u ? gen_gvec_ursra : gen_gvec_srsra;
10265 break;
10266
10267 default:
10268 g_assert_not_reached();
10269 }
10270
10271 gen_gvec_fn2i(s, is_q, rd, rn, shift, gvec_fn, size);
10272 }
10273
10274 /* SHL/SLI - Vector shift left */
handle_vec_simd_shli(DisasContext * s,bool is_q,bool insert,int immh,int immb,int opcode,int rn,int rd)10275 static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
10276 int immh, int immb, int opcode, int rn, int rd)
10277 {
10278 int size = 32 - clz32(immh) - 1;
10279 int immhb = immh << 3 | immb;
10280 int shift = immhb - (8 << size);
10281
10282 /* Range of size is limited by decode: immh is a non-zero 4 bit field */
10283 assert(size >= 0 && size <= 3);
10284
10285 if (extract32(immh, 3, 1) && !is_q) {
10286 unallocated_encoding(s);
10287 return;
10288 }
10289
10290 if (!fp_access_check(s)) {
10291 return;
10292 }
10293
10294 if (insert) {
10295 gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size);
10296 } else {
10297 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
10298 }
10299 }
10300
10301 /* USHLL/SHLL - Vector shift left with widening */
handle_vec_simd_wshli(DisasContext * s,bool is_q,bool is_u,int immh,int immb,int opcode,int rn,int rd)10302 static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
10303 int immh, int immb, int opcode, int rn, int rd)
10304 {
10305 int size = 32 - clz32(immh) - 1;
10306 int immhb = immh << 3 | immb;
10307 int shift = immhb - (8 << size);
10308 int dsize = 64;
10309 int esize = 8 << size;
10310 int elements = dsize/esize;
10311 TCGv_i64 tcg_rn = tcg_temp_new_i64();
10312 TCGv_i64 tcg_rd = tcg_temp_new_i64();
10313 int i;
10314
10315 if (size >= 3) {
10316 unallocated_encoding(s);
10317 return;
10318 }
10319
10320 if (!fp_access_check(s)) {
10321 return;
10322 }
10323
10324 /* For the LL variants the store is larger than the load,
10325 * so if rd == rn we would overwrite parts of our input.
10326 * So load everything right now and use shifts in the main loop.
10327 */
10328 read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
10329
10330 for (i = 0; i < elements; i++) {
10331 tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
10332 ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
10333 tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
10334 write_vec_element(s, tcg_rd, rd, i, size + 1);
10335 }
10336 }
10337
10338 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
handle_vec_simd_shrn(DisasContext * s,bool is_q,int immh,int immb,int opcode,int rn,int rd)10339 static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
10340 int immh, int immb, int opcode, int rn, int rd)
10341 {
10342 int immhb = immh << 3 | immb;
10343 int size = 32 - clz32(immh) - 1;
10344 int dsize = 64;
10345 int esize = 8 << size;
10346 int elements = dsize/esize;
10347 int shift = (2 * esize) - immhb;
10348 bool round = extract32(opcode, 0, 1);
10349 TCGv_i64 tcg_rn, tcg_rd, tcg_final;
10350 TCGv_i64 tcg_round;
10351 int i;
10352
10353 if (extract32(immh, 3, 1)) {
10354 unallocated_encoding(s);
10355 return;
10356 }
10357
10358 if (!fp_access_check(s)) {
10359 return;
10360 }
10361
10362 tcg_rn = tcg_temp_new_i64();
10363 tcg_rd = tcg_temp_new_i64();
10364 tcg_final = tcg_temp_new_i64();
10365 read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
10366
10367 if (round) {
10368 tcg_round = tcg_constant_i64(1ULL << (shift - 1));
10369 } else {
10370 tcg_round = NULL;
10371 }
10372
10373 for (i = 0; i < elements; i++) {
10374 read_vec_element(s, tcg_rn, rn, i, size+1);
10375 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
10376 false, true, size+1, shift);
10377
10378 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
10379 }
10380
10381 if (!is_q) {
10382 write_vec_element(s, tcg_final, rd, 0, MO_64);
10383 } else {
10384 write_vec_element(s, tcg_final, rd, 1, MO_64);
10385 }
10386
10387 clear_vec_high(s, is_q, rd);
10388 }
10389
10390
10391 /* AdvSIMD shift by immediate
10392 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
10393 * +---+---+---+-------------+------+------+--------+---+------+------+
10394 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
10395 * +---+---+---+-------------+------+------+--------+---+------+------+
10396 */
disas_simd_shift_imm(DisasContext * s,uint32_t insn)10397 static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
10398 {
10399 int rd = extract32(insn, 0, 5);
10400 int rn = extract32(insn, 5, 5);
10401 int opcode = extract32(insn, 11, 5);
10402 int immb = extract32(insn, 16, 3);
10403 int immh = extract32(insn, 19, 4);
10404 bool is_u = extract32(insn, 29, 1);
10405 bool is_q = extract32(insn, 30, 1);
10406
10407 /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */
10408 assert(immh != 0);
10409
10410 switch (opcode) {
10411 case 0x08: /* SRI */
10412 if (!is_u) {
10413 unallocated_encoding(s);
10414 return;
10415 }
10416 /* fall through */
10417 case 0x00: /* SSHR / USHR */
10418 case 0x02: /* SSRA / USRA (accumulate) */
10419 case 0x04: /* SRSHR / URSHR (rounding) */
10420 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10421 handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
10422 break;
10423 case 0x0a: /* SHL / SLI */
10424 handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10425 break;
10426 case 0x10: /* SHRN */
10427 case 0x11: /* RSHRN / SQRSHRUN */
10428 if (is_u) {
10429 handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
10430 opcode, rn, rd);
10431 } else {
10432 handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
10433 }
10434 break;
10435 case 0x12: /* SQSHRN / UQSHRN */
10436 case 0x13: /* SQRSHRN / UQRSHRN */
10437 handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
10438 opcode, rn, rd);
10439 break;
10440 case 0x14: /* SSHLL / USHLL */
10441 handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10442 break;
10443 case 0x1c: /* SCVTF / UCVTF */
10444 handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
10445 opcode, rn, rd);
10446 break;
10447 case 0xc: /* SQSHLU */
10448 if (!is_u) {
10449 unallocated_encoding(s);
10450 return;
10451 }
10452 handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
10453 break;
10454 case 0xe: /* SQSHL, UQSHL */
10455 handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
10456 break;
10457 case 0x1f: /* FCVTZS/ FCVTZU */
10458 handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
10459 return;
10460 default:
10461 unallocated_encoding(s);
10462 return;
10463 }
10464 }
10465
10466 /* Generate code to do a "long" addition or subtraction, ie one done in
10467 * TCGv_i64 on vector lanes twice the width specified by size.
10468 */
gen_neon_addl(int size,bool is_sub,TCGv_i64 tcg_res,TCGv_i64 tcg_op1,TCGv_i64 tcg_op2)10469 static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
10470 TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
10471 {
10472 static NeonGenTwo64OpFn * const fns[3][2] = {
10473 { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
10474 { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
10475 { tcg_gen_add_i64, tcg_gen_sub_i64 },
10476 };
10477 NeonGenTwo64OpFn *genfn;
10478 assert(size < 3);
10479
10480 genfn = fns[size][is_sub];
10481 genfn(tcg_res, tcg_op1, tcg_op2);
10482 }
10483
handle_3rd_widening(DisasContext * s,int is_q,int is_u,int size,int opcode,int rd,int rn,int rm)10484 static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
10485 int opcode, int rd, int rn, int rm)
10486 {
10487 /* 3-reg-different widening insns: 64 x 64 -> 128 */
10488 TCGv_i64 tcg_res[2];
10489 int pass, accop;
10490
10491 tcg_res[0] = tcg_temp_new_i64();
10492 tcg_res[1] = tcg_temp_new_i64();
10493
10494 /* Does this op do an adding accumulate, a subtracting accumulate,
10495 * or no accumulate at all?
10496 */
10497 switch (opcode) {
10498 case 5:
10499 case 8:
10500 case 9:
10501 accop = 1;
10502 break;
10503 case 10:
10504 case 11:
10505 accop = -1;
10506 break;
10507 default:
10508 accop = 0;
10509 break;
10510 }
10511
10512 if (accop != 0) {
10513 read_vec_element(s, tcg_res[0], rd, 0, MO_64);
10514 read_vec_element(s, tcg_res[1], rd, 1, MO_64);
10515 }
10516
10517 /* size == 2 means two 32x32->64 operations; this is worth special
10518 * casing because we can generally handle it inline.
10519 */
10520 if (size == 2) {
10521 for (pass = 0; pass < 2; pass++) {
10522 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10523 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10524 TCGv_i64 tcg_passres;
10525 MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
10526
10527 int elt = pass + is_q * 2;
10528
10529 read_vec_element(s, tcg_op1, rn, elt, memop);
10530 read_vec_element(s, tcg_op2, rm, elt, memop);
10531
10532 if (accop == 0) {
10533 tcg_passres = tcg_res[pass];
10534 } else {
10535 tcg_passres = tcg_temp_new_i64();
10536 }
10537
10538 switch (opcode) {
10539 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10540 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
10541 break;
10542 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10543 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
10544 break;
10545 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10546 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10547 {
10548 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
10549 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
10550
10551 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
10552 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
10553 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
10554 tcg_passres,
10555 tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
10556 break;
10557 }
10558 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10559 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10560 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10561 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10562 break;
10563 case 9: /* SQDMLAL, SQDMLAL2 */
10564 case 11: /* SQDMLSL, SQDMLSL2 */
10565 case 13: /* SQDMULL, SQDMULL2 */
10566 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10567 gen_helper_neon_addl_saturate_s64(tcg_passres, tcg_env,
10568 tcg_passres, tcg_passres);
10569 break;
10570 default:
10571 g_assert_not_reached();
10572 }
10573
10574 if (opcode == 9 || opcode == 11) {
10575 /* saturating accumulate ops */
10576 if (accop < 0) {
10577 tcg_gen_neg_i64(tcg_passres, tcg_passres);
10578 }
10579 gen_helper_neon_addl_saturate_s64(tcg_res[pass], tcg_env,
10580 tcg_res[pass], tcg_passres);
10581 } else if (accop > 0) {
10582 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10583 } else if (accop < 0) {
10584 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10585 }
10586 }
10587 } else {
10588 /* size 0 or 1, generally helper functions */
10589 for (pass = 0; pass < 2; pass++) {
10590 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10591 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10592 TCGv_i64 tcg_passres;
10593 int elt = pass + is_q * 2;
10594
10595 read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
10596 read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
10597
10598 if (accop == 0) {
10599 tcg_passres = tcg_res[pass];
10600 } else {
10601 tcg_passres = tcg_temp_new_i64();
10602 }
10603
10604 switch (opcode) {
10605 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10606 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10607 {
10608 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
10609 static NeonGenWidenFn * const widenfns[2][2] = {
10610 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10611 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10612 };
10613 NeonGenWidenFn *widenfn = widenfns[size][is_u];
10614
10615 widenfn(tcg_op2_64, tcg_op2);
10616 widenfn(tcg_passres, tcg_op1);
10617 gen_neon_addl(size, (opcode == 2), tcg_passres,
10618 tcg_passres, tcg_op2_64);
10619 break;
10620 }
10621 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10622 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10623 if (size == 0) {
10624 if (is_u) {
10625 gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
10626 } else {
10627 gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
10628 }
10629 } else {
10630 if (is_u) {
10631 gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
10632 } else {
10633 gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
10634 }
10635 }
10636 break;
10637 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10638 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10639 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10640 if (size == 0) {
10641 if (is_u) {
10642 gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
10643 } else {
10644 gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
10645 }
10646 } else {
10647 if (is_u) {
10648 gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
10649 } else {
10650 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
10651 }
10652 }
10653 break;
10654 case 9: /* SQDMLAL, SQDMLAL2 */
10655 case 11: /* SQDMLSL, SQDMLSL2 */
10656 case 13: /* SQDMULL, SQDMULL2 */
10657 assert(size == 1);
10658 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
10659 gen_helper_neon_addl_saturate_s32(tcg_passres, tcg_env,
10660 tcg_passres, tcg_passres);
10661 break;
10662 default:
10663 g_assert_not_reached();
10664 }
10665
10666 if (accop != 0) {
10667 if (opcode == 9 || opcode == 11) {
10668 /* saturating accumulate ops */
10669 if (accop < 0) {
10670 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
10671 }
10672 gen_helper_neon_addl_saturate_s32(tcg_res[pass], tcg_env,
10673 tcg_res[pass],
10674 tcg_passres);
10675 } else {
10676 gen_neon_addl(size, (accop < 0), tcg_res[pass],
10677 tcg_res[pass], tcg_passres);
10678 }
10679 }
10680 }
10681 }
10682
10683 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
10684 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
10685 }
10686
handle_3rd_wide(DisasContext * s,int is_q,int is_u,int size,int opcode,int rd,int rn,int rm)10687 static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
10688 int opcode, int rd, int rn, int rm)
10689 {
10690 TCGv_i64 tcg_res[2];
10691 int part = is_q ? 2 : 0;
10692 int pass;
10693
10694 for (pass = 0; pass < 2; pass++) {
10695 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10696 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10697 TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
10698 static NeonGenWidenFn * const widenfns[3][2] = {
10699 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10700 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10701 { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
10702 };
10703 NeonGenWidenFn *widenfn = widenfns[size][is_u];
10704
10705 read_vec_element(s, tcg_op1, rn, pass, MO_64);
10706 read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
10707 widenfn(tcg_op2_wide, tcg_op2);
10708 tcg_res[pass] = tcg_temp_new_i64();
10709 gen_neon_addl(size, (opcode == 3),
10710 tcg_res[pass], tcg_op1, tcg_op2_wide);
10711 }
10712
10713 for (pass = 0; pass < 2; pass++) {
10714 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10715 }
10716 }
10717
do_narrow_round_high_u32(TCGv_i32 res,TCGv_i64 in)10718 static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
10719 {
10720 tcg_gen_addi_i64(in, in, 1U << 31);
10721 tcg_gen_extrh_i64_i32(res, in);
10722 }
10723
handle_3rd_narrowing(DisasContext * s,int is_q,int is_u,int size,int opcode,int rd,int rn,int rm)10724 static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
10725 int opcode, int rd, int rn, int rm)
10726 {
10727 TCGv_i32 tcg_res[2];
10728 int part = is_q ? 2 : 0;
10729 int pass;
10730
10731 for (pass = 0; pass < 2; pass++) {
10732 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10733 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10734 TCGv_i64 tcg_wideres = tcg_temp_new_i64();
10735 static NeonGenNarrowFn * const narrowfns[3][2] = {
10736 { gen_helper_neon_narrow_high_u8,
10737 gen_helper_neon_narrow_round_high_u8 },
10738 { gen_helper_neon_narrow_high_u16,
10739 gen_helper_neon_narrow_round_high_u16 },
10740 { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
10741 };
10742 NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
10743
10744 read_vec_element(s, tcg_op1, rn, pass, MO_64);
10745 read_vec_element(s, tcg_op2, rm, pass, MO_64);
10746
10747 gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
10748
10749 tcg_res[pass] = tcg_temp_new_i32();
10750 gennarrow(tcg_res[pass], tcg_wideres);
10751 }
10752
10753 for (pass = 0; pass < 2; pass++) {
10754 write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
10755 }
10756 clear_vec_high(s, is_q, rd);
10757 }
10758
10759 /* AdvSIMD three different
10760 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
10761 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10762 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
10763 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10764 */
disas_simd_three_reg_diff(DisasContext * s,uint32_t insn)10765 static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
10766 {
10767 /* Instructions in this group fall into three basic classes
10768 * (in each case with the operation working on each element in
10769 * the input vectors):
10770 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
10771 * 128 bit input)
10772 * (2) wide 64 x 128 -> 128
10773 * (3) narrowing 128 x 128 -> 64
10774 * Here we do initial decode, catch unallocated cases and
10775 * dispatch to separate functions for each class.
10776 */
10777 int is_q = extract32(insn, 30, 1);
10778 int is_u = extract32(insn, 29, 1);
10779 int size = extract32(insn, 22, 2);
10780 int opcode = extract32(insn, 12, 4);
10781 int rm = extract32(insn, 16, 5);
10782 int rn = extract32(insn, 5, 5);
10783 int rd = extract32(insn, 0, 5);
10784
10785 switch (opcode) {
10786 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
10787 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
10788 /* 64 x 128 -> 128 */
10789 if (size == 3) {
10790 unallocated_encoding(s);
10791 return;
10792 }
10793 if (!fp_access_check(s)) {
10794 return;
10795 }
10796 handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
10797 break;
10798 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
10799 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
10800 /* 128 x 128 -> 64 */
10801 if (size == 3) {
10802 unallocated_encoding(s);
10803 return;
10804 }
10805 if (!fp_access_check(s)) {
10806 return;
10807 }
10808 handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
10809 break;
10810 case 14: /* PMULL, PMULL2 */
10811 if (is_u) {
10812 unallocated_encoding(s);
10813 return;
10814 }
10815 switch (size) {
10816 case 0: /* PMULL.P8 */
10817 if (!fp_access_check(s)) {
10818 return;
10819 }
10820 /* The Q field specifies lo/hi half input for this insn. */
10821 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
10822 gen_helper_neon_pmull_h);
10823 break;
10824
10825 case 3: /* PMULL.P64 */
10826 if (!dc_isar_feature(aa64_pmull, s)) {
10827 unallocated_encoding(s);
10828 return;
10829 }
10830 if (!fp_access_check(s)) {
10831 return;
10832 }
10833 /* The Q field specifies lo/hi half input for this insn. */
10834 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
10835 gen_helper_gvec_pmull_q);
10836 break;
10837
10838 default:
10839 unallocated_encoding(s);
10840 break;
10841 }
10842 return;
10843 case 9: /* SQDMLAL, SQDMLAL2 */
10844 case 11: /* SQDMLSL, SQDMLSL2 */
10845 case 13: /* SQDMULL, SQDMULL2 */
10846 if (is_u || size == 0) {
10847 unallocated_encoding(s);
10848 return;
10849 }
10850 /* fall through */
10851 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10852 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10853 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10854 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10855 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10856 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10857 case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
10858 /* 64 x 64 -> 128 */
10859 if (size == 3) {
10860 unallocated_encoding(s);
10861 return;
10862 }
10863 if (!fp_access_check(s)) {
10864 return;
10865 }
10866
10867 handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
10868 break;
10869 default:
10870 /* opcode 15 not allocated */
10871 unallocated_encoding(s);
10872 break;
10873 }
10874 }
10875
10876 /* AdvSIMD three same extra
10877 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
10878 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
10879 * | 0 | Q | U | 0 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
10880 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
10881 */
disas_simd_three_reg_same_extra(DisasContext * s,uint32_t insn)10882 static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
10883 {
10884 int rd = extract32(insn, 0, 5);
10885 int rn = extract32(insn, 5, 5);
10886 int opcode = extract32(insn, 11, 4);
10887 int rm = extract32(insn, 16, 5);
10888 int size = extract32(insn, 22, 2);
10889 bool u = extract32(insn, 29, 1);
10890 bool is_q = extract32(insn, 30, 1);
10891 bool feature;
10892 int rot;
10893
10894 switch (u * 16 + opcode) {
10895 case 0x10: /* SQRDMLAH (vector) */
10896 case 0x11: /* SQRDMLSH (vector) */
10897 if (size != 1 && size != 2) {
10898 unallocated_encoding(s);
10899 return;
10900 }
10901 feature = dc_isar_feature(aa64_rdm, s);
10902 break;
10903 case 0x02: /* SDOT (vector) */
10904 case 0x12: /* UDOT (vector) */
10905 if (size != MO_32) {
10906 unallocated_encoding(s);
10907 return;
10908 }
10909 feature = dc_isar_feature(aa64_dp, s);
10910 break;
10911 case 0x03: /* USDOT */
10912 if (size != MO_32) {
10913 unallocated_encoding(s);
10914 return;
10915 }
10916 feature = dc_isar_feature(aa64_i8mm, s);
10917 break;
10918 case 0x04: /* SMMLA */
10919 case 0x14: /* UMMLA */
10920 case 0x05: /* USMMLA */
10921 if (!is_q || size != MO_32) {
10922 unallocated_encoding(s);
10923 return;
10924 }
10925 feature = dc_isar_feature(aa64_i8mm, s);
10926 break;
10927 case 0x18: /* FCMLA, #0 */
10928 case 0x19: /* FCMLA, #90 */
10929 case 0x1a: /* FCMLA, #180 */
10930 case 0x1b: /* FCMLA, #270 */
10931 case 0x1c: /* FCADD, #90 */
10932 case 0x1e: /* FCADD, #270 */
10933 if (size == 0
10934 || (size == 1 && !dc_isar_feature(aa64_fp16, s))
10935 || (size == 3 && !is_q)) {
10936 unallocated_encoding(s);
10937 return;
10938 }
10939 feature = dc_isar_feature(aa64_fcma, s);
10940 break;
10941 case 0x1d: /* BFMMLA */
10942 if (size != MO_16 || !is_q) {
10943 unallocated_encoding(s);
10944 return;
10945 }
10946 feature = dc_isar_feature(aa64_bf16, s);
10947 break;
10948 case 0x1f:
10949 switch (size) {
10950 case 1: /* BFDOT */
10951 case 3: /* BFMLAL{B,T} */
10952 feature = dc_isar_feature(aa64_bf16, s);
10953 break;
10954 default:
10955 unallocated_encoding(s);
10956 return;
10957 }
10958 break;
10959 default:
10960 unallocated_encoding(s);
10961 return;
10962 }
10963 if (!feature) {
10964 unallocated_encoding(s);
10965 return;
10966 }
10967 if (!fp_access_check(s)) {
10968 return;
10969 }
10970
10971 switch (opcode) {
10972 case 0x0: /* SQRDMLAH (vector) */
10973 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlah_qc, size);
10974 return;
10975
10976 case 0x1: /* SQRDMLSH (vector) */
10977 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlsh_qc, size);
10978 return;
10979
10980 case 0x2: /* SDOT / UDOT */
10981 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0,
10982 u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
10983 return;
10984
10985 case 0x3: /* USDOT */
10986 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_usdot_b);
10987 return;
10988
10989 case 0x04: /* SMMLA, UMMLA */
10990 gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0,
10991 u ? gen_helper_gvec_ummla_b
10992 : gen_helper_gvec_smmla_b);
10993 return;
10994 case 0x05: /* USMMLA */
10995 gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0, gen_helper_gvec_usmmla_b);
10996 return;
10997
10998 case 0x8: /* FCMLA, #0 */
10999 case 0x9: /* FCMLA, #90 */
11000 case 0xa: /* FCMLA, #180 */
11001 case 0xb: /* FCMLA, #270 */
11002 rot = extract32(opcode, 0, 2);
11003 switch (size) {
11004 case 1:
11005 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, true, rot,
11006 gen_helper_gvec_fcmlah);
11007 break;
11008 case 2:
11009 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
11010 gen_helper_gvec_fcmlas);
11011 break;
11012 case 3:
11013 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
11014 gen_helper_gvec_fcmlad);
11015 break;
11016 default:
11017 g_assert_not_reached();
11018 }
11019 return;
11020
11021 case 0xc: /* FCADD, #90 */
11022 case 0xe: /* FCADD, #270 */
11023 rot = extract32(opcode, 1, 1);
11024 switch (size) {
11025 case 1:
11026 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11027 gen_helper_gvec_fcaddh);
11028 break;
11029 case 2:
11030 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11031 gen_helper_gvec_fcadds);
11032 break;
11033 case 3:
11034 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11035 gen_helper_gvec_fcaddd);
11036 break;
11037 default:
11038 g_assert_not_reached();
11039 }
11040 return;
11041
11042 case 0xd: /* BFMMLA */
11043 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfmmla);
11044 return;
11045 case 0xf:
11046 switch (size) {
11047 case 1: /* BFDOT */
11048 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfdot);
11049 break;
11050 case 3: /* BFMLAL{B,T} */
11051 gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, false, is_q,
11052 gen_helper_gvec_bfmlal);
11053 break;
11054 default:
11055 g_assert_not_reached();
11056 }
11057 return;
11058
11059 default:
11060 g_assert_not_reached();
11061 }
11062 }
11063
handle_2misc_widening(DisasContext * s,int opcode,bool is_q,int size,int rn,int rd)11064 static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
11065 int size, int rn, int rd)
11066 {
11067 /* Handle 2-reg-misc ops which are widening (so each size element
11068 * in the source becomes a 2*size element in the destination.
11069 * The only instruction like this is FCVTL.
11070 */
11071 int pass;
11072
11073 if (size == 3) {
11074 /* 32 -> 64 bit fp conversion */
11075 TCGv_i64 tcg_res[2];
11076 int srcelt = is_q ? 2 : 0;
11077
11078 for (pass = 0; pass < 2; pass++) {
11079 TCGv_i32 tcg_op = tcg_temp_new_i32();
11080 tcg_res[pass] = tcg_temp_new_i64();
11081
11082 read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
11083 gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, tcg_env);
11084 }
11085 for (pass = 0; pass < 2; pass++) {
11086 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11087 }
11088 } else {
11089 /* 16 -> 32 bit fp conversion */
11090 int srcelt = is_q ? 4 : 0;
11091 TCGv_i32 tcg_res[4];
11092 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
11093 TCGv_i32 ahp = get_ahp_flag();
11094
11095 for (pass = 0; pass < 4; pass++) {
11096 tcg_res[pass] = tcg_temp_new_i32();
11097
11098 read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
11099 gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
11100 fpst, ahp);
11101 }
11102 for (pass = 0; pass < 4; pass++) {
11103 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11104 }
11105 }
11106 }
11107
handle_rev(DisasContext * s,int opcode,bool u,bool is_q,int size,int rn,int rd)11108 static void handle_rev(DisasContext *s, int opcode, bool u,
11109 bool is_q, int size, int rn, int rd)
11110 {
11111 int op = (opcode << 1) | u;
11112 int opsz = op + size;
11113 int grp_size = 3 - opsz;
11114 int dsize = is_q ? 128 : 64;
11115 int i;
11116
11117 if (opsz >= 3) {
11118 unallocated_encoding(s);
11119 return;
11120 }
11121
11122 if (!fp_access_check(s)) {
11123 return;
11124 }
11125
11126 if (size == 0) {
11127 /* Special case bytes, use bswap op on each group of elements */
11128 int groups = dsize / (8 << grp_size);
11129
11130 for (i = 0; i < groups; i++) {
11131 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
11132
11133 read_vec_element(s, tcg_tmp, rn, i, grp_size);
11134 switch (grp_size) {
11135 case MO_16:
11136 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
11137 break;
11138 case MO_32:
11139 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
11140 break;
11141 case MO_64:
11142 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
11143 break;
11144 default:
11145 g_assert_not_reached();
11146 }
11147 write_vec_element(s, tcg_tmp, rd, i, grp_size);
11148 }
11149 clear_vec_high(s, is_q, rd);
11150 } else {
11151 int revmask = (1 << grp_size) - 1;
11152 int esize = 8 << size;
11153 int elements = dsize / esize;
11154 TCGv_i64 tcg_rn = tcg_temp_new_i64();
11155 TCGv_i64 tcg_rd[2];
11156
11157 for (i = 0; i < 2; i++) {
11158 tcg_rd[i] = tcg_temp_new_i64();
11159 tcg_gen_movi_i64(tcg_rd[i], 0);
11160 }
11161
11162 for (i = 0; i < elements; i++) {
11163 int e_rev = (i & 0xf) ^ revmask;
11164 int w = (e_rev * esize) / 64;
11165 int o = (e_rev * esize) % 64;
11166
11167 read_vec_element(s, tcg_rn, rn, i, size);
11168 tcg_gen_deposit_i64(tcg_rd[w], tcg_rd[w], tcg_rn, o, esize);
11169 }
11170
11171 for (i = 0; i < 2; i++) {
11172 write_vec_element(s, tcg_rd[i], rd, i, MO_64);
11173 }
11174 clear_vec_high(s, true, rd);
11175 }
11176 }
11177
handle_2misc_pairwise(DisasContext * s,int opcode,bool u,bool is_q,int size,int rn,int rd)11178 static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
11179 bool is_q, int size, int rn, int rd)
11180 {
11181 /* Implement the pairwise operations from 2-misc:
11182 * SADDLP, UADDLP, SADALP, UADALP.
11183 * These all add pairs of elements in the input to produce a
11184 * double-width result element in the output (possibly accumulating).
11185 */
11186 bool accum = (opcode == 0x6);
11187 int maxpass = is_q ? 2 : 1;
11188 int pass;
11189 TCGv_i64 tcg_res[2];
11190
11191 if (size == 2) {
11192 /* 32 + 32 -> 64 op */
11193 MemOp memop = size + (u ? 0 : MO_SIGN);
11194
11195 for (pass = 0; pass < maxpass; pass++) {
11196 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11197 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11198
11199 tcg_res[pass] = tcg_temp_new_i64();
11200
11201 read_vec_element(s, tcg_op1, rn, pass * 2, memop);
11202 read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
11203 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
11204 if (accum) {
11205 read_vec_element(s, tcg_op1, rd, pass, MO_64);
11206 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
11207 }
11208 }
11209 } else {
11210 for (pass = 0; pass < maxpass; pass++) {
11211 TCGv_i64 tcg_op = tcg_temp_new_i64();
11212 NeonGenOne64OpFn *genfn;
11213 static NeonGenOne64OpFn * const fns[2][2] = {
11214 { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
11215 { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
11216 };
11217
11218 genfn = fns[size][u];
11219
11220 tcg_res[pass] = tcg_temp_new_i64();
11221
11222 read_vec_element(s, tcg_op, rn, pass, MO_64);
11223 genfn(tcg_res[pass], tcg_op);
11224
11225 if (accum) {
11226 read_vec_element(s, tcg_op, rd, pass, MO_64);
11227 if (size == 0) {
11228 gen_helper_neon_addl_u16(tcg_res[pass],
11229 tcg_res[pass], tcg_op);
11230 } else {
11231 gen_helper_neon_addl_u32(tcg_res[pass],
11232 tcg_res[pass], tcg_op);
11233 }
11234 }
11235 }
11236 }
11237 if (!is_q) {
11238 tcg_res[1] = tcg_constant_i64(0);
11239 }
11240 for (pass = 0; pass < 2; pass++) {
11241 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11242 }
11243 }
11244
handle_shll(DisasContext * s,bool is_q,int size,int rn,int rd)11245 static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
11246 {
11247 /* Implement SHLL and SHLL2 */
11248 int pass;
11249 int part = is_q ? 2 : 0;
11250 TCGv_i64 tcg_res[2];
11251
11252 for (pass = 0; pass < 2; pass++) {
11253 static NeonGenWidenFn * const widenfns[3] = {
11254 gen_helper_neon_widen_u8,
11255 gen_helper_neon_widen_u16,
11256 tcg_gen_extu_i32_i64,
11257 };
11258 NeonGenWidenFn *widenfn = widenfns[size];
11259 TCGv_i32 tcg_op = tcg_temp_new_i32();
11260
11261 read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
11262 tcg_res[pass] = tcg_temp_new_i64();
11263 widenfn(tcg_res[pass], tcg_op);
11264 tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
11265 }
11266
11267 for (pass = 0; pass < 2; pass++) {
11268 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11269 }
11270 }
11271
11272 /* AdvSIMD two reg misc
11273 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
11274 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11275 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
11276 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11277 */
disas_simd_two_reg_misc(DisasContext * s,uint32_t insn)11278 static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
11279 {
11280 int size = extract32(insn, 22, 2);
11281 int opcode = extract32(insn, 12, 5);
11282 bool u = extract32(insn, 29, 1);
11283 bool is_q = extract32(insn, 30, 1);
11284 int rn = extract32(insn, 5, 5);
11285 int rd = extract32(insn, 0, 5);
11286 bool need_fpstatus = false;
11287 int rmode = -1;
11288 TCGv_i32 tcg_rmode;
11289 TCGv_ptr tcg_fpstatus;
11290
11291 switch (opcode) {
11292 case 0x0: /* REV64, REV32 */
11293 case 0x1: /* REV16 */
11294 handle_rev(s, opcode, u, is_q, size, rn, rd);
11295 return;
11296 case 0x5: /* CNT, NOT, RBIT */
11297 if (u && size == 0) {
11298 /* NOT */
11299 break;
11300 } else if (u && size == 1) {
11301 /* RBIT */
11302 break;
11303 } else if (!u && size == 0) {
11304 /* CNT */
11305 break;
11306 }
11307 unallocated_encoding(s);
11308 return;
11309 case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
11310 case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
11311 if (size == 3) {
11312 unallocated_encoding(s);
11313 return;
11314 }
11315 if (!fp_access_check(s)) {
11316 return;
11317 }
11318
11319 handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
11320 return;
11321 case 0x4: /* CLS, CLZ */
11322 if (size == 3) {
11323 unallocated_encoding(s);
11324 return;
11325 }
11326 break;
11327 case 0x2: /* SADDLP, UADDLP */
11328 case 0x6: /* SADALP, UADALP */
11329 if (size == 3) {
11330 unallocated_encoding(s);
11331 return;
11332 }
11333 if (!fp_access_check(s)) {
11334 return;
11335 }
11336 handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
11337 return;
11338 case 0x13: /* SHLL, SHLL2 */
11339 if (u == 0 || size == 3) {
11340 unallocated_encoding(s);
11341 return;
11342 }
11343 if (!fp_access_check(s)) {
11344 return;
11345 }
11346 handle_shll(s, is_q, size, rn, rd);
11347 return;
11348 case 0xa: /* CMLT */
11349 if (u == 1) {
11350 unallocated_encoding(s);
11351 return;
11352 }
11353 /* fall through */
11354 case 0x8: /* CMGT, CMGE */
11355 case 0x9: /* CMEQ, CMLE */
11356 case 0xb: /* ABS, NEG */
11357 if (size == 3 && !is_q) {
11358 unallocated_encoding(s);
11359 return;
11360 }
11361 break;
11362 case 0x7: /* SQABS, SQNEG */
11363 if (size == 3 && !is_q) {
11364 unallocated_encoding(s);
11365 return;
11366 }
11367 break;
11368 case 0xc ... 0xf:
11369 case 0x16 ... 0x1f:
11370 {
11371 /* Floating point: U, size[1] and opcode indicate operation;
11372 * size[0] indicates single or double precision.
11373 */
11374 int is_double = extract32(size, 0, 1);
11375 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
11376 size = is_double ? 3 : 2;
11377 switch (opcode) {
11378 case 0x2f: /* FABS */
11379 case 0x6f: /* FNEG */
11380 if (size == 3 && !is_q) {
11381 unallocated_encoding(s);
11382 return;
11383 }
11384 break;
11385 case 0x1d: /* SCVTF */
11386 case 0x5d: /* UCVTF */
11387 {
11388 bool is_signed = (opcode == 0x1d) ? true : false;
11389 int elements = is_double ? 2 : is_q ? 4 : 2;
11390 if (is_double && !is_q) {
11391 unallocated_encoding(s);
11392 return;
11393 }
11394 if (!fp_access_check(s)) {
11395 return;
11396 }
11397 handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
11398 return;
11399 }
11400 case 0x2c: /* FCMGT (zero) */
11401 case 0x2d: /* FCMEQ (zero) */
11402 case 0x2e: /* FCMLT (zero) */
11403 case 0x6c: /* FCMGE (zero) */
11404 case 0x6d: /* FCMLE (zero) */
11405 if (size == 3 && !is_q) {
11406 unallocated_encoding(s);
11407 return;
11408 }
11409 handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
11410 return;
11411 case 0x7f: /* FSQRT */
11412 if (size == 3 && !is_q) {
11413 unallocated_encoding(s);
11414 return;
11415 }
11416 break;
11417 case 0x1a: /* FCVTNS */
11418 case 0x1b: /* FCVTMS */
11419 case 0x3a: /* FCVTPS */
11420 case 0x3b: /* FCVTZS */
11421 case 0x5a: /* FCVTNU */
11422 case 0x5b: /* FCVTMU */
11423 case 0x7a: /* FCVTPU */
11424 case 0x7b: /* FCVTZU */
11425 need_fpstatus = true;
11426 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
11427 if (size == 3 && !is_q) {
11428 unallocated_encoding(s);
11429 return;
11430 }
11431 break;
11432 case 0x5c: /* FCVTAU */
11433 case 0x1c: /* FCVTAS */
11434 need_fpstatus = true;
11435 rmode = FPROUNDING_TIEAWAY;
11436 if (size == 3 && !is_q) {
11437 unallocated_encoding(s);
11438 return;
11439 }
11440 break;
11441 case 0x3c: /* URECPE */
11442 if (size == 3) {
11443 unallocated_encoding(s);
11444 return;
11445 }
11446 /* fall through */
11447 case 0x3d: /* FRECPE */
11448 case 0x7d: /* FRSQRTE */
11449 if (size == 3 && !is_q) {
11450 unallocated_encoding(s);
11451 return;
11452 }
11453 if (!fp_access_check(s)) {
11454 return;
11455 }
11456 handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
11457 return;
11458 case 0x56: /* FCVTXN, FCVTXN2 */
11459 if (size == 2) {
11460 unallocated_encoding(s);
11461 return;
11462 }
11463 /* fall through */
11464 case 0x16: /* FCVTN, FCVTN2 */
11465 /* handle_2misc_narrow does a 2*size -> size operation, but these
11466 * instructions encode the source size rather than dest size.
11467 */
11468 if (!fp_access_check(s)) {
11469 return;
11470 }
11471 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
11472 return;
11473 case 0x36: /* BFCVTN, BFCVTN2 */
11474 if (!dc_isar_feature(aa64_bf16, s) || size != 2) {
11475 unallocated_encoding(s);
11476 return;
11477 }
11478 if (!fp_access_check(s)) {
11479 return;
11480 }
11481 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
11482 return;
11483 case 0x17: /* FCVTL, FCVTL2 */
11484 if (!fp_access_check(s)) {
11485 return;
11486 }
11487 handle_2misc_widening(s, opcode, is_q, size, rn, rd);
11488 return;
11489 case 0x18: /* FRINTN */
11490 case 0x19: /* FRINTM */
11491 case 0x38: /* FRINTP */
11492 case 0x39: /* FRINTZ */
11493 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
11494 /* fall through */
11495 case 0x59: /* FRINTX */
11496 case 0x79: /* FRINTI */
11497 need_fpstatus = true;
11498 if (size == 3 && !is_q) {
11499 unallocated_encoding(s);
11500 return;
11501 }
11502 break;
11503 case 0x58: /* FRINTA */
11504 rmode = FPROUNDING_TIEAWAY;
11505 need_fpstatus = true;
11506 if (size == 3 && !is_q) {
11507 unallocated_encoding(s);
11508 return;
11509 }
11510 break;
11511 case 0x7c: /* URSQRTE */
11512 if (size == 3) {
11513 unallocated_encoding(s);
11514 return;
11515 }
11516 break;
11517 case 0x1e: /* FRINT32Z */
11518 case 0x1f: /* FRINT64Z */
11519 rmode = FPROUNDING_ZERO;
11520 /* fall through */
11521 case 0x5e: /* FRINT32X */
11522 case 0x5f: /* FRINT64X */
11523 need_fpstatus = true;
11524 if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) {
11525 unallocated_encoding(s);
11526 return;
11527 }
11528 break;
11529 default:
11530 unallocated_encoding(s);
11531 return;
11532 }
11533 break;
11534 }
11535 default:
11536 case 0x3: /* SUQADD, USQADD */
11537 unallocated_encoding(s);
11538 return;
11539 }
11540
11541 if (!fp_access_check(s)) {
11542 return;
11543 }
11544
11545 if (need_fpstatus || rmode >= 0) {
11546 tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
11547 } else {
11548 tcg_fpstatus = NULL;
11549 }
11550 if (rmode >= 0) {
11551 tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
11552 } else {
11553 tcg_rmode = NULL;
11554 }
11555
11556 switch (opcode) {
11557 case 0x5:
11558 if (u && size == 0) { /* NOT */
11559 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
11560 return;
11561 }
11562 break;
11563 case 0x8: /* CMGT, CMGE */
11564 if (u) {
11565 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size);
11566 } else {
11567 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size);
11568 }
11569 return;
11570 case 0x9: /* CMEQ, CMLE */
11571 if (u) {
11572 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size);
11573 } else {
11574 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size);
11575 }
11576 return;
11577 case 0xa: /* CMLT */
11578 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size);
11579 return;
11580 case 0xb:
11581 if (u) { /* ABS, NEG */
11582 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
11583 } else {
11584 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size);
11585 }
11586 return;
11587 }
11588
11589 if (size == 3) {
11590 /* All 64-bit element operations can be shared with scalar 2misc */
11591 int pass;
11592
11593 /* Coverity claims (size == 3 && !is_q) has been eliminated
11594 * from all paths leading to here.
11595 */
11596 tcg_debug_assert(is_q);
11597 for (pass = 0; pass < 2; pass++) {
11598 TCGv_i64 tcg_op = tcg_temp_new_i64();
11599 TCGv_i64 tcg_res = tcg_temp_new_i64();
11600
11601 read_vec_element(s, tcg_op, rn, pass, MO_64);
11602
11603 handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
11604 tcg_rmode, tcg_fpstatus);
11605
11606 write_vec_element(s, tcg_res, rd, pass, MO_64);
11607 }
11608 } else {
11609 int pass;
11610
11611 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
11612 TCGv_i32 tcg_op = tcg_temp_new_i32();
11613 TCGv_i32 tcg_res = tcg_temp_new_i32();
11614
11615 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
11616
11617 if (size == 2) {
11618 /* Special cases for 32 bit elements */
11619 switch (opcode) {
11620 case 0x4: /* CLS */
11621 if (u) {
11622 tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
11623 } else {
11624 tcg_gen_clrsb_i32(tcg_res, tcg_op);
11625 }
11626 break;
11627 case 0x7: /* SQABS, SQNEG */
11628 if (u) {
11629 gen_helper_neon_qneg_s32(tcg_res, tcg_env, tcg_op);
11630 } else {
11631 gen_helper_neon_qabs_s32(tcg_res, tcg_env, tcg_op);
11632 }
11633 break;
11634 case 0x2f: /* FABS */
11635 gen_vfp_abss(tcg_res, tcg_op);
11636 break;
11637 case 0x6f: /* FNEG */
11638 gen_vfp_negs(tcg_res, tcg_op);
11639 break;
11640 case 0x7f: /* FSQRT */
11641 gen_helper_vfp_sqrts(tcg_res, tcg_op, tcg_env);
11642 break;
11643 case 0x1a: /* FCVTNS */
11644 case 0x1b: /* FCVTMS */
11645 case 0x1c: /* FCVTAS */
11646 case 0x3a: /* FCVTPS */
11647 case 0x3b: /* FCVTZS */
11648 gen_helper_vfp_tosls(tcg_res, tcg_op,
11649 tcg_constant_i32(0), tcg_fpstatus);
11650 break;
11651 case 0x5a: /* FCVTNU */
11652 case 0x5b: /* FCVTMU */
11653 case 0x5c: /* FCVTAU */
11654 case 0x7a: /* FCVTPU */
11655 case 0x7b: /* FCVTZU */
11656 gen_helper_vfp_touls(tcg_res, tcg_op,
11657 tcg_constant_i32(0), tcg_fpstatus);
11658 break;
11659 case 0x18: /* FRINTN */
11660 case 0x19: /* FRINTM */
11661 case 0x38: /* FRINTP */
11662 case 0x39: /* FRINTZ */
11663 case 0x58: /* FRINTA */
11664 case 0x79: /* FRINTI */
11665 gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
11666 break;
11667 case 0x59: /* FRINTX */
11668 gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
11669 break;
11670 case 0x7c: /* URSQRTE */
11671 gen_helper_rsqrte_u32(tcg_res, tcg_op);
11672 break;
11673 case 0x1e: /* FRINT32Z */
11674 case 0x5e: /* FRINT32X */
11675 gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus);
11676 break;
11677 case 0x1f: /* FRINT64Z */
11678 case 0x5f: /* FRINT64X */
11679 gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus);
11680 break;
11681 default:
11682 g_assert_not_reached();
11683 }
11684 } else {
11685 /* Use helpers for 8 and 16 bit elements */
11686 switch (opcode) {
11687 case 0x5: /* CNT, RBIT */
11688 /* For these two insns size is part of the opcode specifier
11689 * (handled earlier); they always operate on byte elements.
11690 */
11691 if (u) {
11692 gen_helper_neon_rbit_u8(tcg_res, tcg_op);
11693 } else {
11694 gen_helper_neon_cnt_u8(tcg_res, tcg_op);
11695 }
11696 break;
11697 case 0x7: /* SQABS, SQNEG */
11698 {
11699 NeonGenOneOpEnvFn *genfn;
11700 static NeonGenOneOpEnvFn * const fns[2][2] = {
11701 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
11702 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
11703 };
11704 genfn = fns[size][u];
11705 genfn(tcg_res, tcg_env, tcg_op);
11706 break;
11707 }
11708 case 0x4: /* CLS, CLZ */
11709 if (u) {
11710 if (size == 0) {
11711 gen_helper_neon_clz_u8(tcg_res, tcg_op);
11712 } else {
11713 gen_helper_neon_clz_u16(tcg_res, tcg_op);
11714 }
11715 } else {
11716 if (size == 0) {
11717 gen_helper_neon_cls_s8(tcg_res, tcg_op);
11718 } else {
11719 gen_helper_neon_cls_s16(tcg_res, tcg_op);
11720 }
11721 }
11722 break;
11723 default:
11724 g_assert_not_reached();
11725 }
11726 }
11727
11728 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
11729 }
11730 }
11731 clear_vec_high(s, is_q, rd);
11732
11733 if (tcg_rmode) {
11734 gen_restore_rmode(tcg_rmode, tcg_fpstatus);
11735 }
11736 }
11737
11738 /* AdvSIMD [scalar] two register miscellaneous (FP16)
11739 *
11740 * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0
11741 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
11742 * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd |
11743 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
11744 * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
11745 * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
11746 *
11747 * This actually covers two groups where scalar access is governed by
11748 * bit 28. A bunch of the instructions (float to integral) only exist
11749 * in the vector form and are un-allocated for the scalar decode. Also
11750 * in the scalar decode Q is always 1.
11751 */
disas_simd_two_reg_misc_fp16(DisasContext * s,uint32_t insn)11752 static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
11753 {
11754 int fpop, opcode, a, u;
11755 int rn, rd;
11756 bool is_q;
11757 bool is_scalar;
11758 bool only_in_vector = false;
11759
11760 int pass;
11761 TCGv_i32 tcg_rmode = NULL;
11762 TCGv_ptr tcg_fpstatus = NULL;
11763 bool need_fpst = true;
11764 int rmode = -1;
11765
11766 if (!dc_isar_feature(aa64_fp16, s)) {
11767 unallocated_encoding(s);
11768 return;
11769 }
11770
11771 rd = extract32(insn, 0, 5);
11772 rn = extract32(insn, 5, 5);
11773
11774 a = extract32(insn, 23, 1);
11775 u = extract32(insn, 29, 1);
11776 is_scalar = extract32(insn, 28, 1);
11777 is_q = extract32(insn, 30, 1);
11778
11779 opcode = extract32(insn, 12, 5);
11780 fpop = deposit32(opcode, 5, 1, a);
11781 fpop = deposit32(fpop, 6, 1, u);
11782
11783 switch (fpop) {
11784 case 0x1d: /* SCVTF */
11785 case 0x5d: /* UCVTF */
11786 {
11787 int elements;
11788
11789 if (is_scalar) {
11790 elements = 1;
11791 } else {
11792 elements = (is_q ? 8 : 4);
11793 }
11794
11795 if (!fp_access_check(s)) {
11796 return;
11797 }
11798 handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
11799 return;
11800 }
11801 break;
11802 case 0x2c: /* FCMGT (zero) */
11803 case 0x2d: /* FCMEQ (zero) */
11804 case 0x2e: /* FCMLT (zero) */
11805 case 0x6c: /* FCMGE (zero) */
11806 case 0x6d: /* FCMLE (zero) */
11807 handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
11808 return;
11809 case 0x3d: /* FRECPE */
11810 case 0x3f: /* FRECPX */
11811 break;
11812 case 0x18: /* FRINTN */
11813 only_in_vector = true;
11814 rmode = FPROUNDING_TIEEVEN;
11815 break;
11816 case 0x19: /* FRINTM */
11817 only_in_vector = true;
11818 rmode = FPROUNDING_NEGINF;
11819 break;
11820 case 0x38: /* FRINTP */
11821 only_in_vector = true;
11822 rmode = FPROUNDING_POSINF;
11823 break;
11824 case 0x39: /* FRINTZ */
11825 only_in_vector = true;
11826 rmode = FPROUNDING_ZERO;
11827 break;
11828 case 0x58: /* FRINTA */
11829 only_in_vector = true;
11830 rmode = FPROUNDING_TIEAWAY;
11831 break;
11832 case 0x59: /* FRINTX */
11833 case 0x79: /* FRINTI */
11834 only_in_vector = true;
11835 /* current rounding mode */
11836 break;
11837 case 0x1a: /* FCVTNS */
11838 rmode = FPROUNDING_TIEEVEN;
11839 break;
11840 case 0x1b: /* FCVTMS */
11841 rmode = FPROUNDING_NEGINF;
11842 break;
11843 case 0x1c: /* FCVTAS */
11844 rmode = FPROUNDING_TIEAWAY;
11845 break;
11846 case 0x3a: /* FCVTPS */
11847 rmode = FPROUNDING_POSINF;
11848 break;
11849 case 0x3b: /* FCVTZS */
11850 rmode = FPROUNDING_ZERO;
11851 break;
11852 case 0x5a: /* FCVTNU */
11853 rmode = FPROUNDING_TIEEVEN;
11854 break;
11855 case 0x5b: /* FCVTMU */
11856 rmode = FPROUNDING_NEGINF;
11857 break;
11858 case 0x5c: /* FCVTAU */
11859 rmode = FPROUNDING_TIEAWAY;
11860 break;
11861 case 0x7a: /* FCVTPU */
11862 rmode = FPROUNDING_POSINF;
11863 break;
11864 case 0x7b: /* FCVTZU */
11865 rmode = FPROUNDING_ZERO;
11866 break;
11867 case 0x2f: /* FABS */
11868 case 0x6f: /* FNEG */
11869 need_fpst = false;
11870 break;
11871 case 0x7d: /* FRSQRTE */
11872 case 0x7f: /* FSQRT (vector) */
11873 break;
11874 default:
11875 unallocated_encoding(s);
11876 return;
11877 }
11878
11879
11880 /* Check additional constraints for the scalar encoding */
11881 if (is_scalar) {
11882 if (!is_q) {
11883 unallocated_encoding(s);
11884 return;
11885 }
11886 /* FRINTxx is only in the vector form */
11887 if (only_in_vector) {
11888 unallocated_encoding(s);
11889 return;
11890 }
11891 }
11892
11893 if (!fp_access_check(s)) {
11894 return;
11895 }
11896
11897 if (rmode >= 0 || need_fpst) {
11898 tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16);
11899 }
11900
11901 if (rmode >= 0) {
11902 tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
11903 }
11904
11905 if (is_scalar) {
11906 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
11907 TCGv_i32 tcg_res = tcg_temp_new_i32();
11908
11909 switch (fpop) {
11910 case 0x1a: /* FCVTNS */
11911 case 0x1b: /* FCVTMS */
11912 case 0x1c: /* FCVTAS */
11913 case 0x3a: /* FCVTPS */
11914 case 0x3b: /* FCVTZS */
11915 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
11916 break;
11917 case 0x3d: /* FRECPE */
11918 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
11919 break;
11920 case 0x3f: /* FRECPX */
11921 gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
11922 break;
11923 case 0x5a: /* FCVTNU */
11924 case 0x5b: /* FCVTMU */
11925 case 0x5c: /* FCVTAU */
11926 case 0x7a: /* FCVTPU */
11927 case 0x7b: /* FCVTZU */
11928 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
11929 break;
11930 case 0x6f: /* FNEG */
11931 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
11932 break;
11933 case 0x7d: /* FRSQRTE */
11934 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
11935 break;
11936 default:
11937 g_assert_not_reached();
11938 }
11939
11940 /* limit any sign extension going on */
11941 tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
11942 write_fp_sreg(s, rd, tcg_res);
11943 } else {
11944 for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
11945 TCGv_i32 tcg_op = tcg_temp_new_i32();
11946 TCGv_i32 tcg_res = tcg_temp_new_i32();
11947
11948 read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
11949
11950 switch (fpop) {
11951 case 0x1a: /* FCVTNS */
11952 case 0x1b: /* FCVTMS */
11953 case 0x1c: /* FCVTAS */
11954 case 0x3a: /* FCVTPS */
11955 case 0x3b: /* FCVTZS */
11956 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
11957 break;
11958 case 0x3d: /* FRECPE */
11959 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
11960 break;
11961 case 0x5a: /* FCVTNU */
11962 case 0x5b: /* FCVTMU */
11963 case 0x5c: /* FCVTAU */
11964 case 0x7a: /* FCVTPU */
11965 case 0x7b: /* FCVTZU */
11966 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
11967 break;
11968 case 0x18: /* FRINTN */
11969 case 0x19: /* FRINTM */
11970 case 0x38: /* FRINTP */
11971 case 0x39: /* FRINTZ */
11972 case 0x58: /* FRINTA */
11973 case 0x79: /* FRINTI */
11974 gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
11975 break;
11976 case 0x59: /* FRINTX */
11977 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
11978 break;
11979 case 0x2f: /* FABS */
11980 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
11981 break;
11982 case 0x6f: /* FNEG */
11983 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
11984 break;
11985 case 0x7d: /* FRSQRTE */
11986 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
11987 break;
11988 case 0x7f: /* FSQRT */
11989 gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
11990 break;
11991 default:
11992 g_assert_not_reached();
11993 }
11994
11995 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11996 }
11997
11998 clear_vec_high(s, is_q, rd);
11999 }
12000
12001 if (tcg_rmode) {
12002 gen_restore_rmode(tcg_rmode, tcg_fpstatus);
12003 }
12004 }
12005
12006 /* AdvSIMD scalar x indexed element
12007 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12008 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12009 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12010 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12011 * AdvSIMD vector x indexed element
12012 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12013 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12014 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12015 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12016 */
disas_simd_indexed(DisasContext * s,uint32_t insn)12017 static void disas_simd_indexed(DisasContext *s, uint32_t insn)
12018 {
12019 /* This encoding has two kinds of instruction:
12020 * normal, where we perform elt x idxelt => elt for each
12021 * element in the vector
12022 * long, where we perform elt x idxelt and generate a result of
12023 * double the width of the input element
12024 * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
12025 */
12026 bool is_scalar = extract32(insn, 28, 1);
12027 bool is_q = extract32(insn, 30, 1);
12028 bool u = extract32(insn, 29, 1);
12029 int size = extract32(insn, 22, 2);
12030 int l = extract32(insn, 21, 1);
12031 int m = extract32(insn, 20, 1);
12032 /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
12033 int rm = extract32(insn, 16, 4);
12034 int opcode = extract32(insn, 12, 4);
12035 int h = extract32(insn, 11, 1);
12036 int rn = extract32(insn, 5, 5);
12037 int rd = extract32(insn, 0, 5);
12038 bool is_long = false;
12039 int is_fp = 0;
12040 bool is_fp16 = false;
12041 int index;
12042 TCGv_ptr fpst;
12043
12044 switch (16 * u + opcode) {
12045 case 0x02: /* SMLAL, SMLAL2 */
12046 case 0x12: /* UMLAL, UMLAL2 */
12047 case 0x06: /* SMLSL, SMLSL2 */
12048 case 0x16: /* UMLSL, UMLSL2 */
12049 case 0x0a: /* SMULL, SMULL2 */
12050 case 0x1a: /* UMULL, UMULL2 */
12051 if (is_scalar) {
12052 unallocated_encoding(s);
12053 return;
12054 }
12055 is_long = true;
12056 break;
12057 case 0x03: /* SQDMLAL, SQDMLAL2 */
12058 case 0x07: /* SQDMLSL, SQDMLSL2 */
12059 case 0x0b: /* SQDMULL, SQDMULL2 */
12060 is_long = true;
12061 break;
12062 case 0x1d: /* SQRDMLAH */
12063 case 0x1f: /* SQRDMLSH */
12064 if (!dc_isar_feature(aa64_rdm, s)) {
12065 unallocated_encoding(s);
12066 return;
12067 }
12068 break;
12069 case 0x0e: /* SDOT */
12070 case 0x1e: /* UDOT */
12071 if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
12072 unallocated_encoding(s);
12073 return;
12074 }
12075 break;
12076 case 0x0f:
12077 switch (size) {
12078 case 0: /* SUDOT */
12079 case 2: /* USDOT */
12080 if (is_scalar || !dc_isar_feature(aa64_i8mm, s)) {
12081 unallocated_encoding(s);
12082 return;
12083 }
12084 size = MO_32;
12085 break;
12086 case 1: /* BFDOT */
12087 if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
12088 unallocated_encoding(s);
12089 return;
12090 }
12091 size = MO_32;
12092 break;
12093 case 3: /* BFMLAL{B,T} */
12094 if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
12095 unallocated_encoding(s);
12096 return;
12097 }
12098 /* can't set is_fp without other incorrect size checks */
12099 size = MO_16;
12100 break;
12101 default:
12102 unallocated_encoding(s);
12103 return;
12104 }
12105 break;
12106 case 0x11: /* FCMLA #0 */
12107 case 0x13: /* FCMLA #90 */
12108 case 0x15: /* FCMLA #180 */
12109 case 0x17: /* FCMLA #270 */
12110 if (is_scalar || !dc_isar_feature(aa64_fcma, s)) {
12111 unallocated_encoding(s);
12112 return;
12113 }
12114 is_fp = 2;
12115 break;
12116 default:
12117 case 0x00: /* FMLAL */
12118 case 0x01: /* FMLA */
12119 case 0x04: /* FMLSL */
12120 case 0x05: /* FMLS */
12121 case 0x08: /* MUL */
12122 case 0x09: /* FMUL */
12123 case 0x0c: /* SQDMULH */
12124 case 0x0d: /* SQRDMULH */
12125 case 0x10: /* MLA */
12126 case 0x14: /* MLS */
12127 case 0x18: /* FMLAL2 */
12128 case 0x19: /* FMULX */
12129 case 0x1c: /* FMLSL2 */
12130 unallocated_encoding(s);
12131 return;
12132 }
12133
12134 switch (is_fp) {
12135 case 1: /* normal fp */
12136 unallocated_encoding(s); /* in decodetree */
12137 return;
12138
12139 case 2: /* complex fp */
12140 /* Each indexable element is a complex pair. */
12141 size += 1;
12142 switch (size) {
12143 case MO_32:
12144 if (h && !is_q) {
12145 unallocated_encoding(s);
12146 return;
12147 }
12148 is_fp16 = true;
12149 break;
12150 case MO_64:
12151 break;
12152 default:
12153 unallocated_encoding(s);
12154 return;
12155 }
12156 break;
12157
12158 default: /* integer */
12159 switch (size) {
12160 case MO_8:
12161 case MO_64:
12162 unallocated_encoding(s);
12163 return;
12164 }
12165 break;
12166 }
12167 if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
12168 unallocated_encoding(s);
12169 return;
12170 }
12171
12172 /* Given MemOp size, adjust register and indexing. */
12173 switch (size) {
12174 case MO_16:
12175 index = h << 2 | l << 1 | m;
12176 break;
12177 case MO_32:
12178 index = h << 1 | l;
12179 rm |= m << 4;
12180 break;
12181 case MO_64:
12182 if (l || !is_q) {
12183 unallocated_encoding(s);
12184 return;
12185 }
12186 index = h;
12187 rm |= m << 4;
12188 break;
12189 default:
12190 g_assert_not_reached();
12191 }
12192
12193 if (!fp_access_check(s)) {
12194 return;
12195 }
12196
12197 if (is_fp) {
12198 fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
12199 } else {
12200 fpst = NULL;
12201 }
12202
12203 switch (16 * u + opcode) {
12204 case 0x0e: /* SDOT */
12205 case 0x1e: /* UDOT */
12206 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
12207 u ? gen_helper_gvec_udot_idx_b
12208 : gen_helper_gvec_sdot_idx_b);
12209 return;
12210 case 0x0f:
12211 switch (extract32(insn, 22, 2)) {
12212 case 0: /* SUDOT */
12213 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
12214 gen_helper_gvec_sudot_idx_b);
12215 return;
12216 case 1: /* BFDOT */
12217 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
12218 gen_helper_gvec_bfdot_idx);
12219 return;
12220 case 2: /* USDOT */
12221 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
12222 gen_helper_gvec_usdot_idx_b);
12223 return;
12224 case 3: /* BFMLAL{B,T} */
12225 gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, 0, (index << 1) | is_q,
12226 gen_helper_gvec_bfmlal_idx);
12227 return;
12228 }
12229 g_assert_not_reached();
12230 case 0x11: /* FCMLA #0 */
12231 case 0x13: /* FCMLA #90 */
12232 case 0x15: /* FCMLA #180 */
12233 case 0x17: /* FCMLA #270 */
12234 {
12235 int rot = extract32(insn, 13, 2);
12236 int data = (index << 2) | rot;
12237 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
12238 vec_full_reg_offset(s, rn),
12239 vec_full_reg_offset(s, rm),
12240 vec_full_reg_offset(s, rd), fpst,
12241 is_q ? 16 : 8, vec_full_reg_size(s), data,
12242 size == MO_64
12243 ? gen_helper_gvec_fcmlas_idx
12244 : gen_helper_gvec_fcmlah_idx);
12245 }
12246 return;
12247 }
12248
12249 if (size == 3) {
12250 g_assert_not_reached();
12251 } else if (!is_long) {
12252 /* 32 bit floating point, or 16 or 32 bit integer.
12253 * For the 16 bit scalar case we use the usual Neon helpers and
12254 * rely on the fact that 0 op 0 == 0 with no side effects.
12255 */
12256 TCGv_i32 tcg_idx = tcg_temp_new_i32();
12257 int pass, maxpasses;
12258
12259 if (is_scalar) {
12260 maxpasses = 1;
12261 } else {
12262 maxpasses = is_q ? 4 : 2;
12263 }
12264
12265 read_vec_element_i32(s, tcg_idx, rm, index, size);
12266
12267 if (size == 1 && !is_scalar) {
12268 /* The simplest way to handle the 16x16 indexed ops is to duplicate
12269 * the index into both halves of the 32 bit tcg_idx and then use
12270 * the usual Neon helpers.
12271 */
12272 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
12273 }
12274
12275 for (pass = 0; pass < maxpasses; pass++) {
12276 TCGv_i32 tcg_op = tcg_temp_new_i32();
12277 TCGv_i32 tcg_res = tcg_temp_new_i32();
12278
12279 read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
12280
12281 switch (16 * u + opcode) {
12282 case 0x10: /* MLA */
12283 case 0x14: /* MLS */
12284 {
12285 static NeonGenTwoOpFn * const fns[2][2] = {
12286 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
12287 { tcg_gen_add_i32, tcg_gen_sub_i32 },
12288 };
12289 NeonGenTwoOpFn *genfn;
12290 bool is_sub = opcode == 0x4;
12291
12292 if (size == 1) {
12293 gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
12294 } else {
12295 tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
12296 }
12297 if (opcode == 0x8) {
12298 break;
12299 }
12300 read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
12301 genfn = fns[size - 1][is_sub];
12302 genfn(tcg_res, tcg_op, tcg_res);
12303 break;
12304 }
12305 case 0x0c: /* SQDMULH */
12306 if (size == 1) {
12307 gen_helper_neon_qdmulh_s16(tcg_res, tcg_env,
12308 tcg_op, tcg_idx);
12309 } else {
12310 gen_helper_neon_qdmulh_s32(tcg_res, tcg_env,
12311 tcg_op, tcg_idx);
12312 }
12313 break;
12314 case 0x0d: /* SQRDMULH */
12315 if (size == 1) {
12316 gen_helper_neon_qrdmulh_s16(tcg_res, tcg_env,
12317 tcg_op, tcg_idx);
12318 } else {
12319 gen_helper_neon_qrdmulh_s32(tcg_res, tcg_env,
12320 tcg_op, tcg_idx);
12321 }
12322 break;
12323 case 0x1d: /* SQRDMLAH */
12324 read_vec_element_i32(s, tcg_res, rd, pass,
12325 is_scalar ? size : MO_32);
12326 if (size == 1) {
12327 gen_helper_neon_qrdmlah_s16(tcg_res, tcg_env,
12328 tcg_op, tcg_idx, tcg_res);
12329 } else {
12330 gen_helper_neon_qrdmlah_s32(tcg_res, tcg_env,
12331 tcg_op, tcg_idx, tcg_res);
12332 }
12333 break;
12334 case 0x1f: /* SQRDMLSH */
12335 read_vec_element_i32(s, tcg_res, rd, pass,
12336 is_scalar ? size : MO_32);
12337 if (size == 1) {
12338 gen_helper_neon_qrdmlsh_s16(tcg_res, tcg_env,
12339 tcg_op, tcg_idx, tcg_res);
12340 } else {
12341 gen_helper_neon_qrdmlsh_s32(tcg_res, tcg_env,
12342 tcg_op, tcg_idx, tcg_res);
12343 }
12344 break;
12345 default:
12346 case 0x01: /* FMLA */
12347 case 0x05: /* FMLS */
12348 case 0x09: /* FMUL */
12349 case 0x19: /* FMULX */
12350 g_assert_not_reached();
12351 }
12352
12353 if (is_scalar) {
12354 write_fp_sreg(s, rd, tcg_res);
12355 } else {
12356 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
12357 }
12358 }
12359
12360 clear_vec_high(s, is_q, rd);
12361 } else {
12362 /* long ops: 16x16->32 or 32x32->64 */
12363 TCGv_i64 tcg_res[2];
12364 int pass;
12365 bool satop = extract32(opcode, 0, 1);
12366 MemOp memop = MO_32;
12367
12368 if (satop || !u) {
12369 memop |= MO_SIGN;
12370 }
12371
12372 if (size == 2) {
12373 TCGv_i64 tcg_idx = tcg_temp_new_i64();
12374
12375 read_vec_element(s, tcg_idx, rm, index, memop);
12376
12377 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
12378 TCGv_i64 tcg_op = tcg_temp_new_i64();
12379 TCGv_i64 tcg_passres;
12380 int passelt;
12381
12382 if (is_scalar) {
12383 passelt = 0;
12384 } else {
12385 passelt = pass + (is_q * 2);
12386 }
12387
12388 read_vec_element(s, tcg_op, rn, passelt, memop);
12389
12390 tcg_res[pass] = tcg_temp_new_i64();
12391
12392 if (opcode == 0xa || opcode == 0xb) {
12393 /* Non-accumulating ops */
12394 tcg_passres = tcg_res[pass];
12395 } else {
12396 tcg_passres = tcg_temp_new_i64();
12397 }
12398
12399 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
12400
12401 if (satop) {
12402 /* saturating, doubling */
12403 gen_helper_neon_addl_saturate_s64(tcg_passres, tcg_env,
12404 tcg_passres, tcg_passres);
12405 }
12406
12407 if (opcode == 0xa || opcode == 0xb) {
12408 continue;
12409 }
12410
12411 /* Accumulating op: handle accumulate step */
12412 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12413
12414 switch (opcode) {
12415 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
12416 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
12417 break;
12418 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
12419 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
12420 break;
12421 case 0x7: /* SQDMLSL, SQDMLSL2 */
12422 tcg_gen_neg_i64(tcg_passres, tcg_passres);
12423 /* fall through */
12424 case 0x3: /* SQDMLAL, SQDMLAL2 */
12425 gen_helper_neon_addl_saturate_s64(tcg_res[pass], tcg_env,
12426 tcg_res[pass],
12427 tcg_passres);
12428 break;
12429 default:
12430 g_assert_not_reached();
12431 }
12432 }
12433
12434 clear_vec_high(s, !is_scalar, rd);
12435 } else {
12436 TCGv_i32 tcg_idx = tcg_temp_new_i32();
12437
12438 assert(size == 1);
12439 read_vec_element_i32(s, tcg_idx, rm, index, size);
12440
12441 if (!is_scalar) {
12442 /* The simplest way to handle the 16x16 indexed ops is to
12443 * duplicate the index into both halves of the 32 bit tcg_idx
12444 * and then use the usual Neon helpers.
12445 */
12446 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
12447 }
12448
12449 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
12450 TCGv_i32 tcg_op = tcg_temp_new_i32();
12451 TCGv_i64 tcg_passres;
12452
12453 if (is_scalar) {
12454 read_vec_element_i32(s, tcg_op, rn, pass, size);
12455 } else {
12456 read_vec_element_i32(s, tcg_op, rn,
12457 pass + (is_q * 2), MO_32);
12458 }
12459
12460 tcg_res[pass] = tcg_temp_new_i64();
12461
12462 if (opcode == 0xa || opcode == 0xb) {
12463 /* Non-accumulating ops */
12464 tcg_passres = tcg_res[pass];
12465 } else {
12466 tcg_passres = tcg_temp_new_i64();
12467 }
12468
12469 if (memop & MO_SIGN) {
12470 gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
12471 } else {
12472 gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
12473 }
12474 if (satop) {
12475 gen_helper_neon_addl_saturate_s32(tcg_passres, tcg_env,
12476 tcg_passres, tcg_passres);
12477 }
12478
12479 if (opcode == 0xa || opcode == 0xb) {
12480 continue;
12481 }
12482
12483 /* Accumulating op: handle accumulate step */
12484 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12485
12486 switch (opcode) {
12487 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
12488 gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
12489 tcg_passres);
12490 break;
12491 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
12492 gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
12493 tcg_passres);
12494 break;
12495 case 0x7: /* SQDMLSL, SQDMLSL2 */
12496 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
12497 /* fall through */
12498 case 0x3: /* SQDMLAL, SQDMLAL2 */
12499 gen_helper_neon_addl_saturate_s32(tcg_res[pass], tcg_env,
12500 tcg_res[pass],
12501 tcg_passres);
12502 break;
12503 default:
12504 g_assert_not_reached();
12505 }
12506 }
12507
12508 if (is_scalar) {
12509 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
12510 }
12511 }
12512
12513 if (is_scalar) {
12514 tcg_res[1] = tcg_constant_i64(0);
12515 }
12516
12517 for (pass = 0; pass < 2; pass++) {
12518 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12519 }
12520 }
12521 }
12522
12523 /* C3.6 Data processing - SIMD, inc Crypto
12524 *
12525 * As the decode gets a little complex we are using a table based
12526 * approach for this part of the decode.
12527 */
12528 static const AArch64DecodeTable data_proc_simd[] = {
12529 /* pattern , mask , fn */
12530 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
12531 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
12532 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
12533 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
12534 { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */
12535 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
12536 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
12537 { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
12538 { 0x0e000000, 0xbf208c00, disas_simd_tb },
12539 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
12540 { 0x2e000000, 0xbf208400, disas_simd_ext },
12541 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
12542 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
12543 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
12544 { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
12545 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
12546 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
12547 { 0x00000000, 0x00000000, NULL }
12548 };
12549
disas_data_proc_simd(DisasContext * s,uint32_t insn)12550 static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
12551 {
12552 /* Note that this is called with all non-FP cases from
12553 * table C3-6 so it must UNDEF for entries not specifically
12554 * allocated to instructions in that table.
12555 */
12556 AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
12557 if (fn) {
12558 fn(s, insn);
12559 } else {
12560 unallocated_encoding(s);
12561 }
12562 }
12563
12564 /* C3.6 Data processing - SIMD and floating point */
disas_data_proc_simd_fp(DisasContext * s,uint32_t insn)12565 static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
12566 {
12567 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
12568 disas_data_proc_fp(s, insn);
12569 } else {
12570 /* SIMD, including crypto */
12571 disas_data_proc_simd(s, insn);
12572 }
12573 }
12574
trans_OK(DisasContext * s,arg_OK * a)12575 static bool trans_OK(DisasContext *s, arg_OK *a)
12576 {
12577 return true;
12578 }
12579
trans_FAIL(DisasContext * s,arg_OK * a)12580 static bool trans_FAIL(DisasContext *s, arg_OK *a)
12581 {
12582 s->is_nonstreaming = true;
12583 return true;
12584 }
12585
12586 /**
12587 * is_guarded_page:
12588 * @env: The cpu environment
12589 * @s: The DisasContext
12590 *
12591 * Return true if the page is guarded.
12592 */
is_guarded_page(CPUARMState * env,DisasContext * s)12593 static bool is_guarded_page(CPUARMState *env, DisasContext *s)
12594 {
12595 uint64_t addr = s->base.pc_first;
12596 #ifdef CONFIG_USER_ONLY
12597 return page_get_flags(addr) & PAGE_BTI;
12598 #else
12599 CPUTLBEntryFull *full;
12600 void *host;
12601 int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
12602 int flags;
12603
12604 /*
12605 * We test this immediately after reading an insn, which means
12606 * that the TLB entry must be present and valid, and thus this
12607 * access will never raise an exception.
12608 */
12609 flags = probe_access_full(env, addr, 0, MMU_INST_FETCH, mmu_idx,
12610 false, &host, &full, 0);
12611 assert(!(flags & TLB_INVALID_MASK));
12612
12613 return full->extra.arm.guarded;
12614 #endif
12615 }
12616
12617 /**
12618 * btype_destination_ok:
12619 * @insn: The instruction at the branch destination
12620 * @bt: SCTLR_ELx.BT
12621 * @btype: PSTATE.BTYPE, and is non-zero
12622 *
12623 * On a guarded page, there are a limited number of insns
12624 * that may be present at the branch target:
12625 * - branch target identifiers,
12626 * - paciasp, pacibsp,
12627 * - BRK insn
12628 * - HLT insn
12629 * Anything else causes a Branch Target Exception.
12630 *
12631 * Return true if the branch is compatible, false to raise BTITRAP.
12632 */
btype_destination_ok(uint32_t insn,bool bt,int btype)12633 static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
12634 {
12635 if ((insn & 0xfffff01fu) == 0xd503201fu) {
12636 /* HINT space */
12637 switch (extract32(insn, 5, 7)) {
12638 case 0b011001: /* PACIASP */
12639 case 0b011011: /* PACIBSP */
12640 /*
12641 * If SCTLR_ELx.BT, then PACI*SP are not compatible
12642 * with btype == 3. Otherwise all btype are ok.
12643 */
12644 return !bt || btype != 3;
12645 case 0b100000: /* BTI */
12646 /* Not compatible with any btype. */
12647 return false;
12648 case 0b100010: /* BTI c */
12649 /* Not compatible with btype == 3 */
12650 return btype != 3;
12651 case 0b100100: /* BTI j */
12652 /* Not compatible with btype == 2 */
12653 return btype != 2;
12654 case 0b100110: /* BTI jc */
12655 /* Compatible with any btype. */
12656 return true;
12657 }
12658 } else {
12659 switch (insn & 0xffe0001fu) {
12660 case 0xd4200000u: /* BRK */
12661 case 0xd4400000u: /* HLT */
12662 /* Give priority to the breakpoint exception. */
12663 return true;
12664 }
12665 }
12666 return false;
12667 }
12668
12669 /* C3.1 A64 instruction index by encoding */
disas_a64_legacy(DisasContext * s,uint32_t insn)12670 static void disas_a64_legacy(DisasContext *s, uint32_t insn)
12671 {
12672 switch (extract32(insn, 25, 4)) {
12673 case 0x5:
12674 case 0xd: /* Data processing - register */
12675 disas_data_proc_reg(s, insn);
12676 break;
12677 case 0x7:
12678 case 0xf: /* Data processing - SIMD and floating point */
12679 disas_data_proc_simd_fp(s, insn);
12680 break;
12681 default:
12682 unallocated_encoding(s);
12683 break;
12684 }
12685 }
12686
aarch64_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cpu)12687 static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
12688 CPUState *cpu)
12689 {
12690 DisasContext *dc = container_of(dcbase, DisasContext, base);
12691 CPUARMState *env = cpu_env(cpu);
12692 ARMCPU *arm_cpu = env_archcpu(env);
12693 CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
12694 int bound, core_mmu_idx;
12695
12696 dc->isar = &arm_cpu->isar;
12697 dc->condjmp = 0;
12698 dc->pc_save = dc->base.pc_first;
12699 dc->aarch64 = true;
12700 dc->thumb = false;
12701 dc->sctlr_b = 0;
12702 dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
12703 dc->condexec_mask = 0;
12704 dc->condexec_cond = 0;
12705 core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
12706 dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
12707 dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
12708 dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
12709 dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
12710 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
12711 #if !defined(CONFIG_USER_ONLY)
12712 dc->user = (dc->current_el == 0);
12713 #endif
12714 dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
12715 dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
12716 dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
12717 dc->fgt_active = EX_TBFLAG_ANY(tb_flags, FGT_ACTIVE);
12718 dc->fgt_svc = EX_TBFLAG_ANY(tb_flags, FGT_SVC);
12719 dc->trap_eret = EX_TBFLAG_A64(tb_flags, TRAP_ERET);
12720 dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
12721 dc->sme_excp_el = EX_TBFLAG_A64(tb_flags, SMEEXC_EL);
12722 dc->vl = (EX_TBFLAG_A64(tb_flags, VL) + 1) * 16;
12723 dc->svl = (EX_TBFLAG_A64(tb_flags, SVL) + 1) * 16;
12724 dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
12725 dc->bt = EX_TBFLAG_A64(tb_flags, BT);
12726 dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
12727 dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV);
12728 dc->ata[0] = EX_TBFLAG_A64(tb_flags, ATA);
12729 dc->ata[1] = EX_TBFLAG_A64(tb_flags, ATA0);
12730 dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
12731 dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
12732 dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM);
12733 dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA);
12734 dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING);
12735 dc->naa = EX_TBFLAG_A64(tb_flags, NAA);
12736 dc->nv = EX_TBFLAG_A64(tb_flags, NV);
12737 dc->nv1 = EX_TBFLAG_A64(tb_flags, NV1);
12738 dc->nv2 = EX_TBFLAG_A64(tb_flags, NV2);
12739 dc->nv2_mem_e20 = EX_TBFLAG_A64(tb_flags, NV2_MEM_E20);
12740 dc->nv2_mem_be = EX_TBFLAG_A64(tb_flags, NV2_MEM_BE);
12741 dc->vec_len = 0;
12742 dc->vec_stride = 0;
12743 dc->cp_regs = arm_cpu->cp_regs;
12744 dc->features = env->features;
12745 dc->dcz_blocksize = arm_cpu->dcz_blocksize;
12746 dc->gm_blocksize = arm_cpu->gm_blocksize;
12747
12748 #ifdef CONFIG_USER_ONLY
12749 /* In sve_probe_page, we assume TBI is enabled. */
12750 tcg_debug_assert(dc->tbid & 1);
12751 #endif
12752
12753 dc->lse2 = dc_isar_feature(aa64_lse2, dc);
12754
12755 /* Single step state. The code-generation logic here is:
12756 * SS_ACTIVE == 0:
12757 * generate code with no special handling for single-stepping (except
12758 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
12759 * this happens anyway because those changes are all system register or
12760 * PSTATE writes).
12761 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
12762 * emit code for one insn
12763 * emit code to clear PSTATE.SS
12764 * emit code to generate software step exception for completed step
12765 * end TB (as usual for having generated an exception)
12766 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
12767 * emit code to generate a software step exception
12768 * end the TB
12769 */
12770 dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
12771 dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
12772 dc->is_ldex = false;
12773
12774 /* Bound the number of insns to execute to those left on the page. */
12775 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
12776
12777 /* If architectural single step active, limit to 1. */
12778 if (dc->ss_active) {
12779 bound = 1;
12780 }
12781 dc->base.max_insns = MIN(dc->base.max_insns, bound);
12782 }
12783
aarch64_tr_tb_start(DisasContextBase * db,CPUState * cpu)12784 static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
12785 {
12786 }
12787
aarch64_tr_insn_start(DisasContextBase * dcbase,CPUState * cpu)12788 static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
12789 {
12790 DisasContext *dc = container_of(dcbase, DisasContext, base);
12791 target_ulong pc_arg = dc->base.pc_next;
12792
12793 if (tb_cflags(dcbase->tb) & CF_PCREL) {
12794 pc_arg &= ~TARGET_PAGE_MASK;
12795 }
12796 tcg_gen_insn_start(pc_arg, 0, 0);
12797 dc->insn_start_updated = false;
12798 }
12799
aarch64_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)12800 static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12801 {
12802 DisasContext *s = container_of(dcbase, DisasContext, base);
12803 CPUARMState *env = cpu_env(cpu);
12804 uint64_t pc = s->base.pc_next;
12805 uint32_t insn;
12806
12807 /* Singlestep exceptions have the highest priority. */
12808 if (s->ss_active && !s->pstate_ss) {
12809 /* Singlestep state is Active-pending.
12810 * If we're in this state at the start of a TB then either
12811 * a) we just took an exception to an EL which is being debugged
12812 * and this is the first insn in the exception handler
12813 * b) debug exceptions were masked and we just unmasked them
12814 * without changing EL (eg by clearing PSTATE.D)
12815 * In either case we're going to take a swstep exception in the
12816 * "did not step an insn" case, and so the syndrome ISV and EX
12817 * bits should be zero.
12818 */
12819 assert(s->base.num_insns == 1);
12820 gen_swstep_exception(s, 0, 0);
12821 s->base.is_jmp = DISAS_NORETURN;
12822 s->base.pc_next = pc + 4;
12823 return;
12824 }
12825
12826 if (pc & 3) {
12827 /*
12828 * PC alignment fault. This has priority over the instruction abort
12829 * that we would receive from a translation fault via arm_ldl_code.
12830 * This should only be possible after an indirect branch, at the
12831 * start of the TB.
12832 */
12833 assert(s->base.num_insns == 1);
12834 gen_helper_exception_pc_alignment(tcg_env, tcg_constant_tl(pc));
12835 s->base.is_jmp = DISAS_NORETURN;
12836 s->base.pc_next = QEMU_ALIGN_UP(pc, 4);
12837 return;
12838 }
12839
12840 s->pc_curr = pc;
12841 insn = arm_ldl_code(env, &s->base, pc, s->sctlr_b);
12842 s->insn = insn;
12843 s->base.pc_next = pc + 4;
12844
12845 s->fp_access_checked = false;
12846 s->sve_access_checked = false;
12847
12848 if (s->pstate_il) {
12849 /*
12850 * Illegal execution state. This has priority over BTI
12851 * exceptions, but comes after instruction abort exceptions.
12852 */
12853 gen_exception_insn(s, 0, EXCP_UDEF, syn_illegalstate());
12854 return;
12855 }
12856
12857 if (dc_isar_feature(aa64_bti, s)) {
12858 if (s->base.num_insns == 1) {
12859 /*
12860 * At the first insn of the TB, compute s->guarded_page.
12861 * We delayed computing this until successfully reading
12862 * the first insn of the TB, above. This (mostly) ensures
12863 * that the softmmu tlb entry has been populated, and the
12864 * page table GP bit is available.
12865 *
12866 * Note that we need to compute this even if btype == 0,
12867 * because this value is used for BR instructions later
12868 * where ENV is not available.
12869 */
12870 s->guarded_page = is_guarded_page(env, s);
12871
12872 /* First insn can have btype set to non-zero. */
12873 tcg_debug_assert(s->btype >= 0);
12874
12875 /*
12876 * Note that the Branch Target Exception has fairly high
12877 * priority -- below debugging exceptions but above most
12878 * everything else. This allows us to handle this now
12879 * instead of waiting until the insn is otherwise decoded.
12880 */
12881 if (s->btype != 0
12882 && s->guarded_page
12883 && !btype_destination_ok(insn, s->bt, s->btype)) {
12884 gen_exception_insn(s, 0, EXCP_UDEF, syn_btitrap(s->btype));
12885 return;
12886 }
12887 } else {
12888 /* Not the first insn: btype must be 0. */
12889 tcg_debug_assert(s->btype == 0);
12890 }
12891 }
12892
12893 s->is_nonstreaming = false;
12894 if (s->sme_trap_nonstreaming) {
12895 disas_sme_fa64(s, insn);
12896 }
12897
12898 if (!disas_a64(s, insn) &&
12899 !disas_sme(s, insn) &&
12900 !disas_sve(s, insn)) {
12901 disas_a64_legacy(s, insn);
12902 }
12903
12904 /*
12905 * After execution of most insns, btype is reset to 0.
12906 * Note that we set btype == -1 when the insn sets btype.
12907 */
12908 if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
12909 reset_btype(s);
12910 }
12911 }
12912
aarch64_tr_tb_stop(DisasContextBase * dcbase,CPUState * cpu)12913 static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
12914 {
12915 DisasContext *dc = container_of(dcbase, DisasContext, base);
12916
12917 if (unlikely(dc->ss_active)) {
12918 /* Note that this means single stepping WFI doesn't halt the CPU.
12919 * For conditional branch insns this is harmless unreachable code as
12920 * gen_goto_tb() has already handled emitting the debug exception
12921 * (and thus a tb-jump is not possible when singlestepping).
12922 */
12923 switch (dc->base.is_jmp) {
12924 default:
12925 gen_a64_update_pc(dc, 4);
12926 /* fall through */
12927 case DISAS_EXIT:
12928 case DISAS_JUMP:
12929 gen_step_complete_exception(dc);
12930 break;
12931 case DISAS_NORETURN:
12932 break;
12933 }
12934 } else {
12935 switch (dc->base.is_jmp) {
12936 case DISAS_NEXT:
12937 case DISAS_TOO_MANY:
12938 gen_goto_tb(dc, 1, 4);
12939 break;
12940 default:
12941 case DISAS_UPDATE_EXIT:
12942 gen_a64_update_pc(dc, 4);
12943 /* fall through */
12944 case DISAS_EXIT:
12945 tcg_gen_exit_tb(NULL, 0);
12946 break;
12947 case DISAS_UPDATE_NOCHAIN:
12948 gen_a64_update_pc(dc, 4);
12949 /* fall through */
12950 case DISAS_JUMP:
12951 tcg_gen_lookup_and_goto_ptr();
12952 break;
12953 case DISAS_NORETURN:
12954 case DISAS_SWI:
12955 break;
12956 case DISAS_WFE:
12957 gen_a64_update_pc(dc, 4);
12958 gen_helper_wfe(tcg_env);
12959 break;
12960 case DISAS_YIELD:
12961 gen_a64_update_pc(dc, 4);
12962 gen_helper_yield(tcg_env);
12963 break;
12964 case DISAS_WFI:
12965 /*
12966 * This is a special case because we don't want to just halt
12967 * the CPU if trying to debug across a WFI.
12968 */
12969 gen_a64_update_pc(dc, 4);
12970 gen_helper_wfi(tcg_env, tcg_constant_i32(4));
12971 /*
12972 * The helper doesn't necessarily throw an exception, but we
12973 * must go back to the main loop to check for interrupts anyway.
12974 */
12975 tcg_gen_exit_tb(NULL, 0);
12976 break;
12977 }
12978 }
12979 }
12980
12981 const TranslatorOps aarch64_translator_ops = {
12982 .init_disas_context = aarch64_tr_init_disas_context,
12983 .tb_start = aarch64_tr_tb_start,
12984 .insn_start = aarch64_tr_insn_start,
12985 .translate_insn = aarch64_tr_translate_insn,
12986 .tb_stop = aarch64_tr_tb_stop,
12987 };
12988