1 /*
2  *  AArch64 translation
3  *
4  *  Copyright (c) 2013 Alexander Graf <agraf@suse.de>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg-op.h"
24 #include "tcg/tcg-op-gvec.h"
25 #include "qemu/log.h"
26 #include "arm_ldst.h"
27 #include "translate.h"
28 #include "internals.h"
29 #include "qemu/host-utils.h"
30 
31 #include "hw/semihosting/semihost.h"
32 #include "exec/gen-icount.h"
33 
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
36 #include "exec/log.h"
37 
38 #include "trace-tcg.h"
39 #include "translate-a64.h"
40 #include "qemu/atomic128.h"
41 
42 static TCGv_i64 cpu_X[32];
43 static TCGv_i64 cpu_pc;
44 
45 /* Load/store exclusive handling */
46 static TCGv_i64 cpu_exclusive_high;
47 
48 static const char *regnames[] = {
49     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
50     "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
51     "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
52     "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
53 };
54 
55 enum a64_shift_type {
56     A64_SHIFT_TYPE_LSL = 0,
57     A64_SHIFT_TYPE_LSR = 1,
58     A64_SHIFT_TYPE_ASR = 2,
59     A64_SHIFT_TYPE_ROR = 3
60 };
61 
62 /* Table based decoder typedefs - used when the relevant bits for decode
63  * are too awkwardly scattered across the instruction (eg SIMD).
64  */
65 typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
66 
67 typedef struct AArch64DecodeTable {
68     uint32_t pattern;
69     uint32_t mask;
70     AArch64DecodeFn *disas_fn;
71 } AArch64DecodeTable;
72 
73 /* Function prototype for gen_ functions for calling Neon helpers */
74 typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
75 typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
76 typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
77 typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
78 typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
79 typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
80 typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
81 typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
82 typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
83 typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
84 typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
85 typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
86 typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
87 typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
88 typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
89 
90 /* initialize TCG globals.  */
a64_translate_init(void)91 void a64_translate_init(void)
92 {
93     int i;
94 
95     cpu_pc = tcg_global_mem_new_i64(cpu_env,
96                                     offsetof(CPUARMState, pc),
97                                     "pc");
98     for (i = 0; i < 32; i++) {
99         cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
100                                           offsetof(CPUARMState, xregs[i]),
101                                           regnames[i]);
102     }
103 
104     cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
105         offsetof(CPUARMState, exclusive_high), "exclusive_high");
106 }
107 
108 /*
109  * Return the core mmu_idx to use for A64 "unprivileged load/store" insns
110  */
get_a64_user_mem_index(DisasContext * s)111 static int get_a64_user_mem_index(DisasContext *s)
112 {
113     /*
114      * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
115      * which is the usual mmu_idx for this cpu state.
116      */
117     ARMMMUIdx useridx = s->mmu_idx;
118 
119     if (s->unpriv) {
120         /*
121          * We have pre-computed the condition for AccType_UNPRIV.
122          * Therefore we should never get here with a mmu_idx for
123          * which we do not know the corresponding user mmu_idx.
124          */
125         switch (useridx) {
126         case ARMMMUIdx_E10_1:
127         case ARMMMUIdx_E10_1_PAN:
128             useridx = ARMMMUIdx_E10_0;
129             break;
130         case ARMMMUIdx_E20_2:
131         case ARMMMUIdx_E20_2_PAN:
132             useridx = ARMMMUIdx_E20_0;
133             break;
134         case ARMMMUIdx_SE10_1:
135         case ARMMMUIdx_SE10_1_PAN:
136             useridx = ARMMMUIdx_SE10_0;
137             break;
138         default:
139             g_assert_not_reached();
140         }
141     }
142     return arm_to_core_mmu_idx(useridx);
143 }
144 
reset_btype(DisasContext * s)145 static void reset_btype(DisasContext *s)
146 {
147     if (s->btype != 0) {
148         TCGv_i32 zero = tcg_const_i32(0);
149         tcg_gen_st_i32(zero, cpu_env, offsetof(CPUARMState, btype));
150         tcg_temp_free_i32(zero);
151         s->btype = 0;
152     }
153 }
154 
set_btype(DisasContext * s,int val)155 static void set_btype(DisasContext *s, int val)
156 {
157     TCGv_i32 tcg_val;
158 
159     /* BTYPE is a 2-bit field, and 0 should be done with reset_btype.  */
160     tcg_debug_assert(val >= 1 && val <= 3);
161 
162     tcg_val = tcg_const_i32(val);
163     tcg_gen_st_i32(tcg_val, cpu_env, offsetof(CPUARMState, btype));
164     tcg_temp_free_i32(tcg_val);
165     s->btype = -1;
166 }
167 
gen_a64_set_pc_im(uint64_t val)168 void gen_a64_set_pc_im(uint64_t val)
169 {
170     tcg_gen_movi_i64(cpu_pc, val);
171 }
172 
173 /*
174  * Handle Top Byte Ignore (TBI) bits.
175  *
176  * If address tagging is enabled via the TCR TBI bits:
177  *  + for EL2 and EL3 there is only one TBI bit, and if it is set
178  *    then the address is zero-extended, clearing bits [63:56]
179  *  + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
180  *    and TBI1 controls addressses with bit 55 == 1.
181  *    If the appropriate TBI bit is set for the address then
182  *    the address is sign-extended from bit 55 into bits [63:56]
183  *
184  * Here We have concatenated TBI{1,0} into tbi.
185  */
gen_top_byte_ignore(DisasContext * s,TCGv_i64 dst,TCGv_i64 src,int tbi)186 static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
187                                 TCGv_i64 src, int tbi)
188 {
189     if (tbi == 0) {
190         /* Load unmodified address */
191         tcg_gen_mov_i64(dst, src);
192     } else if (!regime_has_2_ranges(s->mmu_idx)) {
193         /* Force tag byte to all zero */
194         tcg_gen_extract_i64(dst, src, 0, 56);
195     } else {
196         /* Sign-extend from bit 55.  */
197         tcg_gen_sextract_i64(dst, src, 0, 56);
198 
199         if (tbi != 3) {
200             TCGv_i64 tcg_zero = tcg_const_i64(0);
201 
202             /*
203              * The two TBI bits differ.
204              * If tbi0, then !tbi1: only use the extension if positive.
205              * if !tbi0, then tbi1: only use the extension if negative.
206              */
207             tcg_gen_movcond_i64(tbi == 1 ? TCG_COND_GE : TCG_COND_LT,
208                                 dst, dst, tcg_zero, dst, src);
209             tcg_temp_free_i64(tcg_zero);
210         }
211     }
212 }
213 
gen_a64_set_pc(DisasContext * s,TCGv_i64 src)214 static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
215 {
216     /*
217      * If address tagging is enabled for instructions via the TCR TBI bits,
218      * then loading an address into the PC will clear out any tag.
219      */
220     gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
221 }
222 
223 /*
224  * Return a "clean" address for ADDR according to TBID.
225  * This is always a fresh temporary, as we need to be able to
226  * increment this independently of a dirty write-back address.
227  */
clean_data_tbi(DisasContext * s,TCGv_i64 addr)228 static TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
229 {
230     TCGv_i64 clean = new_tmp_a64(s);
231     /*
232      * In order to get the correct value in the FAR_ELx register,
233      * we must present the memory subsystem with the "dirty" address
234      * including the TBI.  In system mode we can make this work via
235      * the TLB, dropping the TBI during translation.  But for user-only
236      * mode we don't have that option, and must remove the top byte now.
237      */
238 #ifdef CONFIG_USER_ONLY
239     gen_top_byte_ignore(s, clean, addr, s->tbid);
240 #else
241     tcg_gen_mov_i64(clean, addr);
242 #endif
243     return clean;
244 }
245 
246 typedef struct DisasCompare64 {
247     TCGCond cond;
248     TCGv_i64 value;
249 } DisasCompare64;
250 
a64_test_cc(DisasCompare64 * c64,int cc)251 static void a64_test_cc(DisasCompare64 *c64, int cc)
252 {
253     DisasCompare c32;
254 
255     arm_test_cc(&c32, cc);
256 
257     /* Sign-extend the 32-bit value so that the GE/LT comparisons work
258        * properly.  The NE/EQ comparisons are also fine with this choice.  */
259     c64->cond = c32.cond;
260     c64->value = tcg_temp_new_i64();
261     tcg_gen_ext_i32_i64(c64->value, c32.value);
262 
263     arm_free_cc(&c32);
264 }
265 
a64_free_cc(DisasCompare64 * c64)266 static void a64_free_cc(DisasCompare64 *c64)
267 {
268     tcg_temp_free_i64(c64->value);
269 }
270 
gen_exception_internal(int excp)271 static void gen_exception_internal(int excp)
272 {
273     TCGv_i32 tcg_excp = tcg_const_i32(excp);
274 
275     assert(excp_is_internal(excp));
276     gen_helper_exception_internal(cpu_env, tcg_excp);
277     tcg_temp_free_i32(tcg_excp);
278 }
279 
gen_exception_internal_insn(DisasContext * s,uint64_t pc,int excp)280 static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
281 {
282     gen_a64_set_pc_im(pc);
283     gen_exception_internal(excp);
284     s->base.is_jmp = DISAS_NORETURN;
285 }
286 
gen_exception_insn(DisasContext * s,uint64_t pc,int excp,uint32_t syndrome,uint32_t target_el)287 static void gen_exception_insn(DisasContext *s, uint64_t pc, int excp,
288                                uint32_t syndrome, uint32_t target_el)
289 {
290     gen_a64_set_pc_im(pc);
291     gen_exception(excp, syndrome, target_el);
292     s->base.is_jmp = DISAS_NORETURN;
293 }
294 
gen_exception_bkpt_insn(DisasContext * s,uint32_t syndrome)295 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
296 {
297     TCGv_i32 tcg_syn;
298 
299     gen_a64_set_pc_im(s->pc_curr);
300     tcg_syn = tcg_const_i32(syndrome);
301     gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
302     tcg_temp_free_i32(tcg_syn);
303     s->base.is_jmp = DISAS_NORETURN;
304 }
305 
gen_step_complete_exception(DisasContext * s)306 static void gen_step_complete_exception(DisasContext *s)
307 {
308     /* We just completed step of an insn. Move from Active-not-pending
309      * to Active-pending, and then also take the swstep exception.
310      * This corresponds to making the (IMPDEF) choice to prioritize
311      * swstep exceptions over asynchronous exceptions taken to an exception
312      * level where debug is disabled. This choice has the advantage that
313      * we do not need to maintain internal state corresponding to the
314      * ISV/EX syndrome bits between completion of the step and generation
315      * of the exception, and our syndrome information is always correct.
316      */
317     gen_ss_advance(s);
318     gen_swstep_exception(s, 1, s->is_ldex);
319     s->base.is_jmp = DISAS_NORETURN;
320 }
321 
use_goto_tb(DisasContext * s,int n,uint64_t dest)322 static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
323 {
324     /* No direct tb linking with singlestep (either QEMU's or the ARM
325      * debug architecture kind) or deterministic io
326      */
327     if (s->base.singlestep_enabled || s->ss_active ||
328         (tb_cflags(s->base.tb) & CF_LAST_IO)) {
329         return false;
330     }
331 
332 #ifndef CONFIG_USER_ONLY
333     /* Only link tbs from inside the same guest page */
334     if ((s->base.tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
335         return false;
336     }
337 #endif
338 
339     return true;
340 }
341 
gen_goto_tb(DisasContext * s,int n,uint64_t dest)342 static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
343 {
344     TranslationBlock *tb;
345 
346     tb = s->base.tb;
347     if (use_goto_tb(s, n, dest)) {
348         tcg_gen_goto_tb(n);
349         gen_a64_set_pc_im(dest);
350         tcg_gen_exit_tb(tb, n);
351         s->base.is_jmp = DISAS_NORETURN;
352     } else {
353         gen_a64_set_pc_im(dest);
354         if (s->ss_active) {
355             gen_step_complete_exception(s);
356         } else if (s->base.singlestep_enabled) {
357             gen_exception_internal(EXCP_DEBUG);
358         } else {
359             tcg_gen_lookup_and_goto_ptr();
360             s->base.is_jmp = DISAS_NORETURN;
361         }
362     }
363 }
364 
unallocated_encoding(DisasContext * s)365 void unallocated_encoding(DisasContext *s)
366 {
367     /* Unallocated and reserved encodings are uncategorized */
368     gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
369                        default_exception_el(s));
370 }
371 
init_tmp_a64_array(DisasContext * s)372 static void init_tmp_a64_array(DisasContext *s)
373 {
374 #ifdef CONFIG_DEBUG_TCG
375     memset(s->tmp_a64, 0, sizeof(s->tmp_a64));
376 #endif
377     s->tmp_a64_count = 0;
378 }
379 
free_tmp_a64(DisasContext * s)380 static void free_tmp_a64(DisasContext *s)
381 {
382     int i;
383     for (i = 0; i < s->tmp_a64_count; i++) {
384         tcg_temp_free_i64(s->tmp_a64[i]);
385     }
386     init_tmp_a64_array(s);
387 }
388 
new_tmp_a64(DisasContext * s)389 TCGv_i64 new_tmp_a64(DisasContext *s)
390 {
391     assert(s->tmp_a64_count < TMP_A64_MAX);
392     return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
393 }
394 
new_tmp_a64_zero(DisasContext * s)395 TCGv_i64 new_tmp_a64_zero(DisasContext *s)
396 {
397     TCGv_i64 t = new_tmp_a64(s);
398     tcg_gen_movi_i64(t, 0);
399     return t;
400 }
401 
402 /*
403  * Register access functions
404  *
405  * These functions are used for directly accessing a register in where
406  * changes to the final register value are likely to be made. If you
407  * need to use a register for temporary calculation (e.g. index type
408  * operations) use the read_* form.
409  *
410  * B1.2.1 Register mappings
411  *
412  * In instruction register encoding 31 can refer to ZR (zero register) or
413  * the SP (stack pointer) depending on context. In QEMU's case we map SP
414  * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
415  * This is the point of the _sp forms.
416  */
cpu_reg(DisasContext * s,int reg)417 TCGv_i64 cpu_reg(DisasContext *s, int reg)
418 {
419     if (reg == 31) {
420         return new_tmp_a64_zero(s);
421     } else {
422         return cpu_X[reg];
423     }
424 }
425 
426 /* register access for when 31 == SP */
cpu_reg_sp(DisasContext * s,int reg)427 TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
428 {
429     return cpu_X[reg];
430 }
431 
432 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
433  * representing the register contents. This TCGv is an auto-freed
434  * temporary so it need not be explicitly freed, and may be modified.
435  */
read_cpu_reg(DisasContext * s,int reg,int sf)436 TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
437 {
438     TCGv_i64 v = new_tmp_a64(s);
439     if (reg != 31) {
440         if (sf) {
441             tcg_gen_mov_i64(v, cpu_X[reg]);
442         } else {
443             tcg_gen_ext32u_i64(v, cpu_X[reg]);
444         }
445     } else {
446         tcg_gen_movi_i64(v, 0);
447     }
448     return v;
449 }
450 
read_cpu_reg_sp(DisasContext * s,int reg,int sf)451 TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
452 {
453     TCGv_i64 v = new_tmp_a64(s);
454     if (sf) {
455         tcg_gen_mov_i64(v, cpu_X[reg]);
456     } else {
457         tcg_gen_ext32u_i64(v, cpu_X[reg]);
458     }
459     return v;
460 }
461 
462 /* Return the offset into CPUARMState of a slice (from
463  * the least significant end) of FP register Qn (ie
464  * Dn, Sn, Hn or Bn).
465  * (Note that this is not the same mapping as for A32; see cpu.h)
466  */
fp_reg_offset(DisasContext * s,int regno,MemOp size)467 static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size)
468 {
469     return vec_reg_offset(s, regno, 0, size);
470 }
471 
472 /* Offset of the high half of the 128 bit vector Qn */
fp_reg_hi_offset(DisasContext * s,int regno)473 static inline int fp_reg_hi_offset(DisasContext *s, int regno)
474 {
475     return vec_reg_offset(s, regno, 1, MO_64);
476 }
477 
478 /* Convenience accessors for reading and writing single and double
479  * FP registers. Writing clears the upper parts of the associated
480  * 128 bit vector register, as required by the architecture.
481  * Note that unlike the GP register accessors, the values returned
482  * by the read functions must be manually freed.
483  */
read_fp_dreg(DisasContext * s,int reg)484 static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
485 {
486     TCGv_i64 v = tcg_temp_new_i64();
487 
488     tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
489     return v;
490 }
491 
read_fp_sreg(DisasContext * s,int reg)492 static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
493 {
494     TCGv_i32 v = tcg_temp_new_i32();
495 
496     tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
497     return v;
498 }
499 
read_fp_hreg(DisasContext * s,int reg)500 static TCGv_i32 read_fp_hreg(DisasContext *s, int reg)
501 {
502     TCGv_i32 v = tcg_temp_new_i32();
503 
504     tcg_gen_ld16u_i32(v, cpu_env, fp_reg_offset(s, reg, MO_16));
505     return v;
506 }
507 
508 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
509  * If SVE is not enabled, then there are only 128 bits in the vector.
510  */
clear_vec_high(DisasContext * s,bool is_q,int rd)511 static void clear_vec_high(DisasContext *s, bool is_q, int rd)
512 {
513     unsigned ofs = fp_reg_offset(s, rd, MO_64);
514     unsigned vsz = vec_full_reg_size(s);
515 
516     if (!is_q) {
517         TCGv_i64 tcg_zero = tcg_const_i64(0);
518         tcg_gen_st_i64(tcg_zero, cpu_env, ofs + 8);
519         tcg_temp_free_i64(tcg_zero);
520     }
521     if (vsz > 16) {
522         tcg_gen_gvec_dup8i(ofs + 16, vsz - 16, vsz - 16, 0);
523     }
524 }
525 
write_fp_dreg(DisasContext * s,int reg,TCGv_i64 v)526 void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
527 {
528     unsigned ofs = fp_reg_offset(s, reg, MO_64);
529 
530     tcg_gen_st_i64(v, cpu_env, ofs);
531     clear_vec_high(s, false, reg);
532 }
533 
write_fp_sreg(DisasContext * s,int reg,TCGv_i32 v)534 static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
535 {
536     TCGv_i64 tmp = tcg_temp_new_i64();
537 
538     tcg_gen_extu_i32_i64(tmp, v);
539     write_fp_dreg(s, reg, tmp);
540     tcg_temp_free_i64(tmp);
541 }
542 
get_fpstatus_ptr(bool is_f16)543 TCGv_ptr get_fpstatus_ptr(bool is_f16)
544 {
545     TCGv_ptr statusptr = tcg_temp_new_ptr();
546     int offset;
547 
548     /* In A64 all instructions (both FP and Neon) use the FPCR; there
549      * is no equivalent of the A32 Neon "standard FPSCR value".
550      * However half-precision operations operate under a different
551      * FZ16 flag and use vfp.fp_status_f16 instead of vfp.fp_status.
552      */
553     if (is_f16) {
554         offset = offsetof(CPUARMState, vfp.fp_status_f16);
555     } else {
556         offset = offsetof(CPUARMState, vfp.fp_status);
557     }
558     tcg_gen_addi_ptr(statusptr, cpu_env, offset);
559     return statusptr;
560 }
561 
562 /* Expand a 2-operand AdvSIMD vector operation using an expander function.  */
gen_gvec_fn2(DisasContext * s,bool is_q,int rd,int rn,GVecGen2Fn * gvec_fn,int vece)563 static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
564                          GVecGen2Fn *gvec_fn, int vece)
565 {
566     gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
567             is_q ? 16 : 8, vec_full_reg_size(s));
568 }
569 
570 /* Expand a 2-operand + immediate AdvSIMD vector operation using
571  * an expander function.
572  */
gen_gvec_fn2i(DisasContext * s,bool is_q,int rd,int rn,int64_t imm,GVecGen2iFn * gvec_fn,int vece)573 static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
574                           int64_t imm, GVecGen2iFn *gvec_fn, int vece)
575 {
576     gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
577             imm, is_q ? 16 : 8, vec_full_reg_size(s));
578 }
579 
580 /* Expand a 3-operand AdvSIMD vector operation using an expander function.  */
gen_gvec_fn3(DisasContext * s,bool is_q,int rd,int rn,int rm,GVecGen3Fn * gvec_fn,int vece)581 static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
582                          GVecGen3Fn *gvec_fn, int vece)
583 {
584     gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
585             vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
586 }
587 
588 /* Expand a 4-operand AdvSIMD vector operation using an expander function.  */
gen_gvec_fn4(DisasContext * s,bool is_q,int rd,int rn,int rm,int rx,GVecGen4Fn * gvec_fn,int vece)589 static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
590                          int rx, GVecGen4Fn *gvec_fn, int vece)
591 {
592     gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
593             vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx),
594             is_q ? 16 : 8, vec_full_reg_size(s));
595 }
596 
597 /* Expand a 2-operand + immediate AdvSIMD vector operation using
598  * an op descriptor.
599  */
gen_gvec_op2i(DisasContext * s,bool is_q,int rd,int rn,int64_t imm,const GVecGen2i * gvec_op)600 static void gen_gvec_op2i(DisasContext *s, bool is_q, int rd,
601                           int rn, int64_t imm, const GVecGen2i *gvec_op)
602 {
603     tcg_gen_gvec_2i(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
604                     is_q ? 16 : 8, vec_full_reg_size(s), imm, gvec_op);
605 }
606 
607 /* Expand a 3-operand AdvSIMD vector operation using an op descriptor.  */
gen_gvec_op3(DisasContext * s,bool is_q,int rd,int rn,int rm,const GVecGen3 * gvec_op)608 static void gen_gvec_op3(DisasContext *s, bool is_q, int rd,
609                          int rn, int rm, const GVecGen3 *gvec_op)
610 {
611     tcg_gen_gvec_3(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
612                    vec_full_reg_offset(s, rm), is_q ? 16 : 8,
613                    vec_full_reg_size(s), gvec_op);
614 }
615 
616 /* Expand a 3-operand operation using an out-of-line helper.  */
gen_gvec_op3_ool(DisasContext * s,bool is_q,int rd,int rn,int rm,int data,gen_helper_gvec_3 * fn)617 static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
618                              int rn, int rm, int data, gen_helper_gvec_3 *fn)
619 {
620     tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
621                        vec_full_reg_offset(s, rn),
622                        vec_full_reg_offset(s, rm),
623                        is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
624 }
625 
626 /* Expand a 3-operand + env pointer operation using
627  * an out-of-line helper.
628  */
gen_gvec_op3_env(DisasContext * s,bool is_q,int rd,int rn,int rm,gen_helper_gvec_3_ptr * fn)629 static void gen_gvec_op3_env(DisasContext *s, bool is_q, int rd,
630                              int rn, int rm, gen_helper_gvec_3_ptr *fn)
631 {
632     tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
633                        vec_full_reg_offset(s, rn),
634                        vec_full_reg_offset(s, rm), cpu_env,
635                        is_q ? 16 : 8, vec_full_reg_size(s), 0, fn);
636 }
637 
638 /* Expand a 3-operand + fpstatus pointer + simd data value operation using
639  * an out-of-line helper.
640  */
gen_gvec_op3_fpst(DisasContext * s,bool is_q,int rd,int rn,int rm,bool is_fp16,int data,gen_helper_gvec_3_ptr * fn)641 static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
642                               int rm, bool is_fp16, int data,
643                               gen_helper_gvec_3_ptr *fn)
644 {
645     TCGv_ptr fpst = get_fpstatus_ptr(is_fp16);
646     tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
647                        vec_full_reg_offset(s, rn),
648                        vec_full_reg_offset(s, rm), fpst,
649                        is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
650     tcg_temp_free_ptr(fpst);
651 }
652 
653 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
654  * than the 32 bit equivalent.
655  */
gen_set_NZ64(TCGv_i64 result)656 static inline void gen_set_NZ64(TCGv_i64 result)
657 {
658     tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
659     tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
660 }
661 
662 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
gen_logic_CC(int sf,TCGv_i64 result)663 static inline void gen_logic_CC(int sf, TCGv_i64 result)
664 {
665     if (sf) {
666         gen_set_NZ64(result);
667     } else {
668         tcg_gen_extrl_i64_i32(cpu_ZF, result);
669         tcg_gen_mov_i32(cpu_NF, cpu_ZF);
670     }
671     tcg_gen_movi_i32(cpu_CF, 0);
672     tcg_gen_movi_i32(cpu_VF, 0);
673 }
674 
675 /* dest = T0 + T1; compute C, N, V and Z flags */
gen_add_CC(int sf,TCGv_i64 dest,TCGv_i64 t0,TCGv_i64 t1)676 static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
677 {
678     if (sf) {
679         TCGv_i64 result, flag, tmp;
680         result = tcg_temp_new_i64();
681         flag = tcg_temp_new_i64();
682         tmp = tcg_temp_new_i64();
683 
684         tcg_gen_movi_i64(tmp, 0);
685         tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
686 
687         tcg_gen_extrl_i64_i32(cpu_CF, flag);
688 
689         gen_set_NZ64(result);
690 
691         tcg_gen_xor_i64(flag, result, t0);
692         tcg_gen_xor_i64(tmp, t0, t1);
693         tcg_gen_andc_i64(flag, flag, tmp);
694         tcg_temp_free_i64(tmp);
695         tcg_gen_extrh_i64_i32(cpu_VF, flag);
696 
697         tcg_gen_mov_i64(dest, result);
698         tcg_temp_free_i64(result);
699         tcg_temp_free_i64(flag);
700     } else {
701         /* 32 bit arithmetic */
702         TCGv_i32 t0_32 = tcg_temp_new_i32();
703         TCGv_i32 t1_32 = tcg_temp_new_i32();
704         TCGv_i32 tmp = tcg_temp_new_i32();
705 
706         tcg_gen_movi_i32(tmp, 0);
707         tcg_gen_extrl_i64_i32(t0_32, t0);
708         tcg_gen_extrl_i64_i32(t1_32, t1);
709         tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
710         tcg_gen_mov_i32(cpu_ZF, cpu_NF);
711         tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
712         tcg_gen_xor_i32(tmp, t0_32, t1_32);
713         tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
714         tcg_gen_extu_i32_i64(dest, cpu_NF);
715 
716         tcg_temp_free_i32(tmp);
717         tcg_temp_free_i32(t0_32);
718         tcg_temp_free_i32(t1_32);
719     }
720 }
721 
722 /* dest = T0 - T1; compute C, N, V and Z flags */
gen_sub_CC(int sf,TCGv_i64 dest,TCGv_i64 t0,TCGv_i64 t1)723 static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
724 {
725     if (sf) {
726         /* 64 bit arithmetic */
727         TCGv_i64 result, flag, tmp;
728 
729         result = tcg_temp_new_i64();
730         flag = tcg_temp_new_i64();
731         tcg_gen_sub_i64(result, t0, t1);
732 
733         gen_set_NZ64(result);
734 
735         tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
736         tcg_gen_extrl_i64_i32(cpu_CF, flag);
737 
738         tcg_gen_xor_i64(flag, result, t0);
739         tmp = tcg_temp_new_i64();
740         tcg_gen_xor_i64(tmp, t0, t1);
741         tcg_gen_and_i64(flag, flag, tmp);
742         tcg_temp_free_i64(tmp);
743         tcg_gen_extrh_i64_i32(cpu_VF, flag);
744         tcg_gen_mov_i64(dest, result);
745         tcg_temp_free_i64(flag);
746         tcg_temp_free_i64(result);
747     } else {
748         /* 32 bit arithmetic */
749         TCGv_i32 t0_32 = tcg_temp_new_i32();
750         TCGv_i32 t1_32 = tcg_temp_new_i32();
751         TCGv_i32 tmp;
752 
753         tcg_gen_extrl_i64_i32(t0_32, t0);
754         tcg_gen_extrl_i64_i32(t1_32, t1);
755         tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
756         tcg_gen_mov_i32(cpu_ZF, cpu_NF);
757         tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
758         tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
759         tmp = tcg_temp_new_i32();
760         tcg_gen_xor_i32(tmp, t0_32, t1_32);
761         tcg_temp_free_i32(t0_32);
762         tcg_temp_free_i32(t1_32);
763         tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
764         tcg_temp_free_i32(tmp);
765         tcg_gen_extu_i32_i64(dest, cpu_NF);
766     }
767 }
768 
769 /* dest = T0 + T1 + CF; do not compute flags. */
gen_adc(int sf,TCGv_i64 dest,TCGv_i64 t0,TCGv_i64 t1)770 static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
771 {
772     TCGv_i64 flag = tcg_temp_new_i64();
773     tcg_gen_extu_i32_i64(flag, cpu_CF);
774     tcg_gen_add_i64(dest, t0, t1);
775     tcg_gen_add_i64(dest, dest, flag);
776     tcg_temp_free_i64(flag);
777 
778     if (!sf) {
779         tcg_gen_ext32u_i64(dest, dest);
780     }
781 }
782 
783 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
gen_adc_CC(int sf,TCGv_i64 dest,TCGv_i64 t0,TCGv_i64 t1)784 static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
785 {
786     if (sf) {
787         TCGv_i64 result, cf_64, vf_64, tmp;
788         result = tcg_temp_new_i64();
789         cf_64 = tcg_temp_new_i64();
790         vf_64 = tcg_temp_new_i64();
791         tmp = tcg_const_i64(0);
792 
793         tcg_gen_extu_i32_i64(cf_64, cpu_CF);
794         tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
795         tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
796         tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
797         gen_set_NZ64(result);
798 
799         tcg_gen_xor_i64(vf_64, result, t0);
800         tcg_gen_xor_i64(tmp, t0, t1);
801         tcg_gen_andc_i64(vf_64, vf_64, tmp);
802         tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
803 
804         tcg_gen_mov_i64(dest, result);
805 
806         tcg_temp_free_i64(tmp);
807         tcg_temp_free_i64(vf_64);
808         tcg_temp_free_i64(cf_64);
809         tcg_temp_free_i64(result);
810     } else {
811         TCGv_i32 t0_32, t1_32, tmp;
812         t0_32 = tcg_temp_new_i32();
813         t1_32 = tcg_temp_new_i32();
814         tmp = tcg_const_i32(0);
815 
816         tcg_gen_extrl_i64_i32(t0_32, t0);
817         tcg_gen_extrl_i64_i32(t1_32, t1);
818         tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
819         tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
820 
821         tcg_gen_mov_i32(cpu_ZF, cpu_NF);
822         tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
823         tcg_gen_xor_i32(tmp, t0_32, t1_32);
824         tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
825         tcg_gen_extu_i32_i64(dest, cpu_NF);
826 
827         tcg_temp_free_i32(tmp);
828         tcg_temp_free_i32(t1_32);
829         tcg_temp_free_i32(t0_32);
830     }
831 }
832 
833 /*
834  * Load/Store generators
835  */
836 
837 /*
838  * Store from GPR register to memory.
839  */
do_gpr_st_memidx(DisasContext * s,TCGv_i64 source,TCGv_i64 tcg_addr,int size,int memidx,bool iss_valid,unsigned int iss_srt,bool iss_sf,bool iss_ar)840 static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
841                              TCGv_i64 tcg_addr, int size, int memidx,
842                              bool iss_valid,
843                              unsigned int iss_srt,
844                              bool iss_sf, bool iss_ar)
845 {
846     g_assert(size <= 3);
847     tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
848 
849     if (iss_valid) {
850         uint32_t syn;
851 
852         syn = syn_data_abort_with_iss(0,
853                                       size,
854                                       false,
855                                       iss_srt,
856                                       iss_sf,
857                                       iss_ar,
858                                       0, 0, 0, 0, 0, false);
859         disas_set_insn_syndrome(s, syn);
860     }
861 }
862 
do_gpr_st(DisasContext * s,TCGv_i64 source,TCGv_i64 tcg_addr,int size,bool iss_valid,unsigned int iss_srt,bool iss_sf,bool iss_ar)863 static void do_gpr_st(DisasContext *s, TCGv_i64 source,
864                       TCGv_i64 tcg_addr, int size,
865                       bool iss_valid,
866                       unsigned int iss_srt,
867                       bool iss_sf, bool iss_ar)
868 {
869     do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
870                      iss_valid, iss_srt, iss_sf, iss_ar);
871 }
872 
873 /*
874  * Load from memory to GPR register
875  */
do_gpr_ld_memidx(DisasContext * s,TCGv_i64 dest,TCGv_i64 tcg_addr,int size,bool is_signed,bool extend,int memidx,bool iss_valid,unsigned int iss_srt,bool iss_sf,bool iss_ar)876 static void do_gpr_ld_memidx(DisasContext *s,
877                              TCGv_i64 dest, TCGv_i64 tcg_addr,
878                              int size, bool is_signed,
879                              bool extend, int memidx,
880                              bool iss_valid, unsigned int iss_srt,
881                              bool iss_sf, bool iss_ar)
882 {
883     MemOp memop = s->be_data + size;
884 
885     g_assert(size <= 3);
886 
887     if (is_signed) {
888         memop += MO_SIGN;
889     }
890 
891     tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
892 
893     if (extend && is_signed) {
894         g_assert(size < 3);
895         tcg_gen_ext32u_i64(dest, dest);
896     }
897 
898     if (iss_valid) {
899         uint32_t syn;
900 
901         syn = syn_data_abort_with_iss(0,
902                                       size,
903                                       is_signed,
904                                       iss_srt,
905                                       iss_sf,
906                                       iss_ar,
907                                       0, 0, 0, 0, 0, false);
908         disas_set_insn_syndrome(s, syn);
909     }
910 }
911 
do_gpr_ld(DisasContext * s,TCGv_i64 dest,TCGv_i64 tcg_addr,int size,bool is_signed,bool extend,bool iss_valid,unsigned int iss_srt,bool iss_sf,bool iss_ar)912 static void do_gpr_ld(DisasContext *s,
913                       TCGv_i64 dest, TCGv_i64 tcg_addr,
914                       int size, bool is_signed, bool extend,
915                       bool iss_valid, unsigned int iss_srt,
916                       bool iss_sf, bool iss_ar)
917 {
918     do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
919                      get_mem_index(s),
920                      iss_valid, iss_srt, iss_sf, iss_ar);
921 }
922 
923 /*
924  * Store from FP register to memory
925  */
do_fp_st(DisasContext * s,int srcidx,TCGv_i64 tcg_addr,int size)926 static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
927 {
928     /* This writes the bottom N bits of a 128 bit wide vector to memory */
929     TCGv_i64 tmp = tcg_temp_new_i64();
930     tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
931     if (size < 4) {
932         tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
933                             s->be_data + size);
934     } else {
935         bool be = s->be_data == MO_BE;
936         TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
937 
938         tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
939         tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
940                             s->be_data | MO_Q);
941         tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
942         tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
943                             s->be_data | MO_Q);
944         tcg_temp_free_i64(tcg_hiaddr);
945     }
946 
947     tcg_temp_free_i64(tmp);
948 }
949 
950 /*
951  * Load from memory to FP register
952  */
do_fp_ld(DisasContext * s,int destidx,TCGv_i64 tcg_addr,int size)953 static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
954 {
955     /* This always zero-extends and writes to a full 128 bit wide vector */
956     TCGv_i64 tmplo = tcg_temp_new_i64();
957     TCGv_i64 tmphi;
958 
959     if (size < 4) {
960         MemOp memop = s->be_data + size;
961         tmphi = tcg_const_i64(0);
962         tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
963     } else {
964         bool be = s->be_data == MO_BE;
965         TCGv_i64 tcg_hiaddr;
966 
967         tmphi = tcg_temp_new_i64();
968         tcg_hiaddr = tcg_temp_new_i64();
969 
970         tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
971         tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
972                             s->be_data | MO_Q);
973         tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
974                             s->be_data | MO_Q);
975         tcg_temp_free_i64(tcg_hiaddr);
976     }
977 
978     tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
979     tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
980 
981     tcg_temp_free_i64(tmplo);
982     tcg_temp_free_i64(tmphi);
983 
984     clear_vec_high(s, true, destidx);
985 }
986 
987 /*
988  * Vector load/store helpers.
989  *
990  * The principal difference between this and a FP load is that we don't
991  * zero extend as we are filling a partial chunk of the vector register.
992  * These functions don't support 128 bit loads/stores, which would be
993  * normal load/store operations.
994  *
995  * The _i32 versions are useful when operating on 32 bit quantities
996  * (eg for floating point single or using Neon helper functions).
997  */
998 
999 /* Get value of an element within a vector register */
read_vec_element(DisasContext * s,TCGv_i64 tcg_dest,int srcidx,int element,MemOp memop)1000 static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
1001                              int element, MemOp memop)
1002 {
1003     int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1004     switch (memop) {
1005     case MO_8:
1006         tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
1007         break;
1008     case MO_16:
1009         tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
1010         break;
1011     case MO_32:
1012         tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
1013         break;
1014     case MO_8|MO_SIGN:
1015         tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
1016         break;
1017     case MO_16|MO_SIGN:
1018         tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
1019         break;
1020     case MO_32|MO_SIGN:
1021         tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
1022         break;
1023     case MO_64:
1024     case MO_64|MO_SIGN:
1025         tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
1026         break;
1027     default:
1028         g_assert_not_reached();
1029     }
1030 }
1031 
read_vec_element_i32(DisasContext * s,TCGv_i32 tcg_dest,int srcidx,int element,MemOp memop)1032 static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1033                                  int element, MemOp memop)
1034 {
1035     int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1036     switch (memop) {
1037     case MO_8:
1038         tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1039         break;
1040     case MO_16:
1041         tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1042         break;
1043     case MO_8|MO_SIGN:
1044         tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1045         break;
1046     case MO_16|MO_SIGN:
1047         tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1048         break;
1049     case MO_32:
1050     case MO_32|MO_SIGN:
1051         tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1052         break;
1053     default:
1054         g_assert_not_reached();
1055     }
1056 }
1057 
1058 /* Set value of an element within a vector register */
write_vec_element(DisasContext * s,TCGv_i64 tcg_src,int destidx,int element,MemOp memop)1059 static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1060                               int element, MemOp memop)
1061 {
1062     int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1063     switch (memop) {
1064     case MO_8:
1065         tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1066         break;
1067     case MO_16:
1068         tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1069         break;
1070     case MO_32:
1071         tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1072         break;
1073     case MO_64:
1074         tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1075         break;
1076     default:
1077         g_assert_not_reached();
1078     }
1079 }
1080 
write_vec_element_i32(DisasContext * s,TCGv_i32 tcg_src,int destidx,int element,MemOp memop)1081 static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1082                                   int destidx, int element, MemOp memop)
1083 {
1084     int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1085     switch (memop) {
1086     case MO_8:
1087         tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1088         break;
1089     case MO_16:
1090         tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1091         break;
1092     case MO_32:
1093         tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1094         break;
1095     default:
1096         g_assert_not_reached();
1097     }
1098 }
1099 
1100 /* Store from vector register to memory */
do_vec_st(DisasContext * s,int srcidx,int element,TCGv_i64 tcg_addr,int size,MemOp endian)1101 static void do_vec_st(DisasContext *s, int srcidx, int element,
1102                       TCGv_i64 tcg_addr, int size, MemOp endian)
1103 {
1104     TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1105 
1106     read_vec_element(s, tcg_tmp, srcidx, element, size);
1107     tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
1108 
1109     tcg_temp_free_i64(tcg_tmp);
1110 }
1111 
1112 /* Load from memory to vector register */
do_vec_ld(DisasContext * s,int destidx,int element,TCGv_i64 tcg_addr,int size,MemOp endian)1113 static void do_vec_ld(DisasContext *s, int destidx, int element,
1114                       TCGv_i64 tcg_addr, int size, MemOp endian)
1115 {
1116     TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1117 
1118     tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
1119     write_vec_element(s, tcg_tmp, destidx, element, size);
1120 
1121     tcg_temp_free_i64(tcg_tmp);
1122 }
1123 
1124 /* Check that FP/Neon access is enabled. If it is, return
1125  * true. If not, emit code to generate an appropriate exception,
1126  * and return false; the caller should not emit any code for
1127  * the instruction. Note that this check must happen after all
1128  * unallocated-encoding checks (otherwise the syndrome information
1129  * for the resulting exception will be incorrect).
1130  */
fp_access_check(DisasContext * s)1131 static inline bool fp_access_check(DisasContext *s)
1132 {
1133     assert(!s->fp_access_checked);
1134     s->fp_access_checked = true;
1135 
1136     if (!s->fp_excp_el) {
1137         return true;
1138     }
1139 
1140     gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
1141                        syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
1142     return false;
1143 }
1144 
1145 /* Check that SVE access is enabled.  If it is, return true.
1146  * If not, emit code to generate an appropriate exception and return false.
1147  */
sve_access_check(DisasContext * s)1148 bool sve_access_check(DisasContext *s)
1149 {
1150     if (s->sve_excp_el) {
1151         gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_sve_access_trap(),
1152                            s->sve_excp_el);
1153         return false;
1154     }
1155     return fp_access_check(s);
1156 }
1157 
1158 /*
1159  * This utility function is for doing register extension with an
1160  * optional shift. You will likely want to pass a temporary for the
1161  * destination register. See DecodeRegExtend() in the ARM ARM.
1162  */
ext_and_shift_reg(TCGv_i64 tcg_out,TCGv_i64 tcg_in,int option,unsigned int shift)1163 static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1164                               int option, unsigned int shift)
1165 {
1166     int extsize = extract32(option, 0, 2);
1167     bool is_signed = extract32(option, 2, 1);
1168 
1169     if (is_signed) {
1170         switch (extsize) {
1171         case 0:
1172             tcg_gen_ext8s_i64(tcg_out, tcg_in);
1173             break;
1174         case 1:
1175             tcg_gen_ext16s_i64(tcg_out, tcg_in);
1176             break;
1177         case 2:
1178             tcg_gen_ext32s_i64(tcg_out, tcg_in);
1179             break;
1180         case 3:
1181             tcg_gen_mov_i64(tcg_out, tcg_in);
1182             break;
1183         }
1184     } else {
1185         switch (extsize) {
1186         case 0:
1187             tcg_gen_ext8u_i64(tcg_out, tcg_in);
1188             break;
1189         case 1:
1190             tcg_gen_ext16u_i64(tcg_out, tcg_in);
1191             break;
1192         case 2:
1193             tcg_gen_ext32u_i64(tcg_out, tcg_in);
1194             break;
1195         case 3:
1196             tcg_gen_mov_i64(tcg_out, tcg_in);
1197             break;
1198         }
1199     }
1200 
1201     if (shift) {
1202         tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1203     }
1204 }
1205 
gen_check_sp_alignment(DisasContext * s)1206 static inline void gen_check_sp_alignment(DisasContext *s)
1207 {
1208     /* The AArch64 architecture mandates that (if enabled via PSTATE
1209      * or SCTLR bits) there is a check that SP is 16-aligned on every
1210      * SP-relative load or store (with an exception generated if it is not).
1211      * In line with general QEMU practice regarding misaligned accesses,
1212      * we omit these checks for the sake of guest program performance.
1213      * This function is provided as a hook so we can more easily add these
1214      * checks in future (possibly as a "favour catching guest program bugs
1215      * over speed" user selectable option).
1216      */
1217 }
1218 
1219 /*
1220  * This provides a simple table based table lookup decoder. It is
1221  * intended to be used when the relevant bits for decode are too
1222  * awkwardly placed and switch/if based logic would be confusing and
1223  * deeply nested. Since it's a linear search through the table, tables
1224  * should be kept small.
1225  *
1226  * It returns the first handler where insn & mask == pattern, or
1227  * NULL if there is no match.
1228  * The table is terminated by an empty mask (i.e. 0)
1229  */
lookup_disas_fn(const AArch64DecodeTable * table,uint32_t insn)1230 static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1231                                                uint32_t insn)
1232 {
1233     const AArch64DecodeTable *tptr = table;
1234 
1235     while (tptr->mask) {
1236         if ((insn & tptr->mask) == tptr->pattern) {
1237             return tptr->disas_fn;
1238         }
1239         tptr++;
1240     }
1241     return NULL;
1242 }
1243 
1244 /*
1245  * The instruction disassembly implemented here matches
1246  * the instruction encoding classifications in chapter C4
1247  * of the ARM Architecture Reference Manual (DDI0487B_a);
1248  * classification names and decode diagrams here should generally
1249  * match up with those in the manual.
1250  */
1251 
1252 /* Unconditional branch (immediate)
1253  *   31  30       26 25                                  0
1254  * +----+-----------+-------------------------------------+
1255  * | op | 0 0 1 0 1 |                 imm26               |
1256  * +----+-----------+-------------------------------------+
1257  */
disas_uncond_b_imm(DisasContext * s,uint32_t insn)1258 static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
1259 {
1260     uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4;
1261 
1262     if (insn & (1U << 31)) {
1263         /* BL Branch with link */
1264         tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
1265     }
1266 
1267     /* B Branch / BL Branch with link */
1268     reset_btype(s);
1269     gen_goto_tb(s, 0, addr);
1270 }
1271 
1272 /* Compare and branch (immediate)
1273  *   31  30         25  24  23                  5 4      0
1274  * +----+-------------+----+---------------------+--------+
1275  * | sf | 0 1 1 0 1 0 | op |         imm19       |   Rt   |
1276  * +----+-------------+----+---------------------+--------+
1277  */
disas_comp_b_imm(DisasContext * s,uint32_t insn)1278 static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
1279 {
1280     unsigned int sf, op, rt;
1281     uint64_t addr;
1282     TCGLabel *label_match;
1283     TCGv_i64 tcg_cmp;
1284 
1285     sf = extract32(insn, 31, 1);
1286     op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
1287     rt = extract32(insn, 0, 5);
1288     addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
1289 
1290     tcg_cmp = read_cpu_reg(s, rt, sf);
1291     label_match = gen_new_label();
1292 
1293     reset_btype(s);
1294     tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1295                         tcg_cmp, 0, label_match);
1296 
1297     gen_goto_tb(s, 0, s->base.pc_next);
1298     gen_set_label(label_match);
1299     gen_goto_tb(s, 1, addr);
1300 }
1301 
1302 /* Test and branch (immediate)
1303  *   31  30         25  24  23   19 18          5 4    0
1304  * +----+-------------+----+-------+-------------+------+
1305  * | b5 | 0 1 1 0 1 1 | op |  b40  |    imm14    |  Rt  |
1306  * +----+-------------+----+-------+-------------+------+
1307  */
disas_test_b_imm(DisasContext * s,uint32_t insn)1308 static void disas_test_b_imm(DisasContext *s, uint32_t insn)
1309 {
1310     unsigned int bit_pos, op, rt;
1311     uint64_t addr;
1312     TCGLabel *label_match;
1313     TCGv_i64 tcg_cmp;
1314 
1315     bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1316     op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
1317     addr = s->pc_curr + sextract32(insn, 5, 14) * 4;
1318     rt = extract32(insn, 0, 5);
1319 
1320     tcg_cmp = tcg_temp_new_i64();
1321     tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1322     label_match = gen_new_label();
1323 
1324     reset_btype(s);
1325     tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1326                         tcg_cmp, 0, label_match);
1327     tcg_temp_free_i64(tcg_cmp);
1328     gen_goto_tb(s, 0, s->base.pc_next);
1329     gen_set_label(label_match);
1330     gen_goto_tb(s, 1, addr);
1331 }
1332 
1333 /* Conditional branch (immediate)
1334  *  31           25  24  23                  5   4  3    0
1335  * +---------------+----+---------------------+----+------+
1336  * | 0 1 0 1 0 1 0 | o1 |         imm19       | o0 | cond |
1337  * +---------------+----+---------------------+----+------+
1338  */
disas_cond_b_imm(DisasContext * s,uint32_t insn)1339 static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1340 {
1341     unsigned int cond;
1342     uint64_t addr;
1343 
1344     if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1345         unallocated_encoding(s);
1346         return;
1347     }
1348     addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
1349     cond = extract32(insn, 0, 4);
1350 
1351     reset_btype(s);
1352     if (cond < 0x0e) {
1353         /* genuinely conditional branches */
1354         TCGLabel *label_match = gen_new_label();
1355         arm_gen_test_cc(cond, label_match);
1356         gen_goto_tb(s, 0, s->base.pc_next);
1357         gen_set_label(label_match);
1358         gen_goto_tb(s, 1, addr);
1359     } else {
1360         /* 0xe and 0xf are both "always" conditions */
1361         gen_goto_tb(s, 0, addr);
1362     }
1363 }
1364 
1365 /* HINT instruction group, including various allocated HINTs */
handle_hint(DisasContext * s,uint32_t insn,unsigned int op1,unsigned int op2,unsigned int crm)1366 static void handle_hint(DisasContext *s, uint32_t insn,
1367                         unsigned int op1, unsigned int op2, unsigned int crm)
1368 {
1369     unsigned int selector = crm << 3 | op2;
1370 
1371     if (op1 != 3) {
1372         unallocated_encoding(s);
1373         return;
1374     }
1375 
1376     switch (selector) {
1377     case 0b00000: /* NOP */
1378         break;
1379     case 0b00011: /* WFI */
1380         s->base.is_jmp = DISAS_WFI;
1381         break;
1382     case 0b00001: /* YIELD */
1383         /* When running in MTTCG we don't generate jumps to the yield and
1384          * WFE helpers as it won't affect the scheduling of other vCPUs.
1385          * If we wanted to more completely model WFE/SEV so we don't busy
1386          * spin unnecessarily we would need to do something more involved.
1387          */
1388         if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1389             s->base.is_jmp = DISAS_YIELD;
1390         }
1391         break;
1392     case 0b00010: /* WFE */
1393         if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1394             s->base.is_jmp = DISAS_WFE;
1395         }
1396         break;
1397     case 0b00100: /* SEV */
1398     case 0b00101: /* SEVL */
1399         /* we treat all as NOP at least for now */
1400         break;
1401     case 0b00111: /* XPACLRI */
1402         if (s->pauth_active) {
1403             gen_helper_xpaci(cpu_X[30], cpu_env, cpu_X[30]);
1404         }
1405         break;
1406     case 0b01000: /* PACIA1716 */
1407         if (s->pauth_active) {
1408             gen_helper_pacia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1409         }
1410         break;
1411     case 0b01010: /* PACIB1716 */
1412         if (s->pauth_active) {
1413             gen_helper_pacib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1414         }
1415         break;
1416     case 0b01100: /* AUTIA1716 */
1417         if (s->pauth_active) {
1418             gen_helper_autia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1419         }
1420         break;
1421     case 0b01110: /* AUTIB1716 */
1422         if (s->pauth_active) {
1423             gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1424         }
1425         break;
1426     case 0b11000: /* PACIAZ */
1427         if (s->pauth_active) {
1428             gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30],
1429                                 new_tmp_a64_zero(s));
1430         }
1431         break;
1432     case 0b11001: /* PACIASP */
1433         if (s->pauth_active) {
1434             gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1435         }
1436         break;
1437     case 0b11010: /* PACIBZ */
1438         if (s->pauth_active) {
1439             gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30],
1440                                 new_tmp_a64_zero(s));
1441         }
1442         break;
1443     case 0b11011: /* PACIBSP */
1444         if (s->pauth_active) {
1445             gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1446         }
1447         break;
1448     case 0b11100: /* AUTIAZ */
1449         if (s->pauth_active) {
1450             gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30],
1451                               new_tmp_a64_zero(s));
1452         }
1453         break;
1454     case 0b11101: /* AUTIASP */
1455         if (s->pauth_active) {
1456             gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1457         }
1458         break;
1459     case 0b11110: /* AUTIBZ */
1460         if (s->pauth_active) {
1461             gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30],
1462                               new_tmp_a64_zero(s));
1463         }
1464         break;
1465     case 0b11111: /* AUTIBSP */
1466         if (s->pauth_active) {
1467             gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1468         }
1469         break;
1470     default:
1471         /* default specified as NOP equivalent */
1472         break;
1473     }
1474 }
1475 
gen_clrex(DisasContext * s,uint32_t insn)1476 static void gen_clrex(DisasContext *s, uint32_t insn)
1477 {
1478     tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1479 }
1480 
1481 /* CLREX, DSB, DMB, ISB */
handle_sync(DisasContext * s,uint32_t insn,unsigned int op1,unsigned int op2,unsigned int crm)1482 static void handle_sync(DisasContext *s, uint32_t insn,
1483                         unsigned int op1, unsigned int op2, unsigned int crm)
1484 {
1485     TCGBar bar;
1486 
1487     if (op1 != 3) {
1488         unallocated_encoding(s);
1489         return;
1490     }
1491 
1492     switch (op2) {
1493     case 2: /* CLREX */
1494         gen_clrex(s, insn);
1495         return;
1496     case 4: /* DSB */
1497     case 5: /* DMB */
1498         switch (crm & 3) {
1499         case 1: /* MBReqTypes_Reads */
1500             bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1501             break;
1502         case 2: /* MBReqTypes_Writes */
1503             bar = TCG_BAR_SC | TCG_MO_ST_ST;
1504             break;
1505         default: /* MBReqTypes_All */
1506             bar = TCG_BAR_SC | TCG_MO_ALL;
1507             break;
1508         }
1509         tcg_gen_mb(bar);
1510         return;
1511     case 6: /* ISB */
1512         /* We need to break the TB after this insn to execute
1513          * a self-modified code correctly and also to take
1514          * any pending interrupts immediately.
1515          */
1516         reset_btype(s);
1517         gen_goto_tb(s, 0, s->base.pc_next);
1518         return;
1519 
1520     case 7: /* SB */
1521         if (crm != 0 || !dc_isar_feature(aa64_sb, s)) {
1522             goto do_unallocated;
1523         }
1524         /*
1525          * TODO: There is no speculation barrier opcode for TCG;
1526          * MB and end the TB instead.
1527          */
1528         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1529         gen_goto_tb(s, 0, s->base.pc_next);
1530         return;
1531 
1532     default:
1533     do_unallocated:
1534         unallocated_encoding(s);
1535         return;
1536     }
1537 }
1538 
gen_xaflag(void)1539 static void gen_xaflag(void)
1540 {
1541     TCGv_i32 z = tcg_temp_new_i32();
1542 
1543     tcg_gen_setcondi_i32(TCG_COND_EQ, z, cpu_ZF, 0);
1544 
1545     /*
1546      * (!C & !Z) << 31
1547      * (!(C | Z)) << 31
1548      * ~((C | Z) << 31)
1549      * ~-(C | Z)
1550      * (C | Z) - 1
1551      */
1552     tcg_gen_or_i32(cpu_NF, cpu_CF, z);
1553     tcg_gen_subi_i32(cpu_NF, cpu_NF, 1);
1554 
1555     /* !(Z & C) */
1556     tcg_gen_and_i32(cpu_ZF, z, cpu_CF);
1557     tcg_gen_xori_i32(cpu_ZF, cpu_ZF, 1);
1558 
1559     /* (!C & Z) << 31 -> -(Z & ~C) */
1560     tcg_gen_andc_i32(cpu_VF, z, cpu_CF);
1561     tcg_gen_neg_i32(cpu_VF, cpu_VF);
1562 
1563     /* C | Z */
1564     tcg_gen_or_i32(cpu_CF, cpu_CF, z);
1565 
1566     tcg_temp_free_i32(z);
1567 }
1568 
gen_axflag(void)1569 static void gen_axflag(void)
1570 {
1571     tcg_gen_sari_i32(cpu_VF, cpu_VF, 31);         /* V ? -1 : 0 */
1572     tcg_gen_andc_i32(cpu_CF, cpu_CF, cpu_VF);     /* C & !V */
1573 
1574     /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */
1575     tcg_gen_andc_i32(cpu_ZF, cpu_ZF, cpu_VF);
1576 
1577     tcg_gen_movi_i32(cpu_NF, 0);
1578     tcg_gen_movi_i32(cpu_VF, 0);
1579 }
1580 
1581 /* MSR (immediate) - move immediate to processor state field */
handle_msr_i(DisasContext * s,uint32_t insn,unsigned int op1,unsigned int op2,unsigned int crm)1582 static void handle_msr_i(DisasContext *s, uint32_t insn,
1583                          unsigned int op1, unsigned int op2, unsigned int crm)
1584 {
1585     TCGv_i32 t1;
1586     int op = op1 << 3 | op2;
1587 
1588     /* End the TB by default, chaining is ok.  */
1589     s->base.is_jmp = DISAS_TOO_MANY;
1590 
1591     switch (op) {
1592     case 0x00: /* CFINV */
1593         if (crm != 0 || !dc_isar_feature(aa64_condm_4, s)) {
1594             goto do_unallocated;
1595         }
1596         tcg_gen_xori_i32(cpu_CF, cpu_CF, 1);
1597         s->base.is_jmp = DISAS_NEXT;
1598         break;
1599 
1600     case 0x01: /* XAFlag */
1601         if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1602             goto do_unallocated;
1603         }
1604         gen_xaflag();
1605         s->base.is_jmp = DISAS_NEXT;
1606         break;
1607 
1608     case 0x02: /* AXFlag */
1609         if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1610             goto do_unallocated;
1611         }
1612         gen_axflag();
1613         s->base.is_jmp = DISAS_NEXT;
1614         break;
1615 
1616     case 0x03: /* UAO */
1617         if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
1618             goto do_unallocated;
1619         }
1620         if (crm & 1) {
1621             set_pstate_bits(PSTATE_UAO);
1622         } else {
1623             clear_pstate_bits(PSTATE_UAO);
1624         }
1625         t1 = tcg_const_i32(s->current_el);
1626         gen_helper_rebuild_hflags_a64(cpu_env, t1);
1627         tcg_temp_free_i32(t1);
1628         break;
1629 
1630     case 0x04: /* PAN */
1631         if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
1632             goto do_unallocated;
1633         }
1634         if (crm & 1) {
1635             set_pstate_bits(PSTATE_PAN);
1636         } else {
1637             clear_pstate_bits(PSTATE_PAN);
1638         }
1639         t1 = tcg_const_i32(s->current_el);
1640         gen_helper_rebuild_hflags_a64(cpu_env, t1);
1641         tcg_temp_free_i32(t1);
1642         break;
1643 
1644     case 0x05: /* SPSel */
1645         if (s->current_el == 0) {
1646             goto do_unallocated;
1647         }
1648         t1 = tcg_const_i32(crm & PSTATE_SP);
1649         gen_helper_msr_i_spsel(cpu_env, t1);
1650         tcg_temp_free_i32(t1);
1651         break;
1652 
1653     case 0x1e: /* DAIFSet */
1654         t1 = tcg_const_i32(crm);
1655         gen_helper_msr_i_daifset(cpu_env, t1);
1656         tcg_temp_free_i32(t1);
1657         break;
1658 
1659     case 0x1f: /* DAIFClear */
1660         t1 = tcg_const_i32(crm);
1661         gen_helper_msr_i_daifclear(cpu_env, t1);
1662         tcg_temp_free_i32(t1);
1663         /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs.  */
1664         s->base.is_jmp = DISAS_UPDATE;
1665         break;
1666 
1667     default:
1668     do_unallocated:
1669         unallocated_encoding(s);
1670         return;
1671     }
1672 }
1673 
gen_get_nzcv(TCGv_i64 tcg_rt)1674 static void gen_get_nzcv(TCGv_i64 tcg_rt)
1675 {
1676     TCGv_i32 tmp = tcg_temp_new_i32();
1677     TCGv_i32 nzcv = tcg_temp_new_i32();
1678 
1679     /* build bit 31, N */
1680     tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1681     /* build bit 30, Z */
1682     tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1683     tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1684     /* build bit 29, C */
1685     tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1686     /* build bit 28, V */
1687     tcg_gen_shri_i32(tmp, cpu_VF, 31);
1688     tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1689     /* generate result */
1690     tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1691 
1692     tcg_temp_free_i32(nzcv);
1693     tcg_temp_free_i32(tmp);
1694 }
1695 
gen_set_nzcv(TCGv_i64 tcg_rt)1696 static void gen_set_nzcv(TCGv_i64 tcg_rt)
1697 {
1698     TCGv_i32 nzcv = tcg_temp_new_i32();
1699 
1700     /* take NZCV from R[t] */
1701     tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1702 
1703     /* bit 31, N */
1704     tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1705     /* bit 30, Z */
1706     tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1707     tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1708     /* bit 29, C */
1709     tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1710     tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1711     /* bit 28, V */
1712     tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1713     tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1714     tcg_temp_free_i32(nzcv);
1715 }
1716 
1717 /* MRS - move from system register
1718  * MSR (register) - move to system register
1719  * SYS
1720  * SYSL
1721  * These are all essentially the same insn in 'read' and 'write'
1722  * versions, with varying op0 fields.
1723  */
handle_sys(DisasContext * s,uint32_t insn,bool isread,unsigned int op0,unsigned int op1,unsigned int op2,unsigned int crn,unsigned int crm,unsigned int rt)1724 static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1725                        unsigned int op0, unsigned int op1, unsigned int op2,
1726                        unsigned int crn, unsigned int crm, unsigned int rt)
1727 {
1728     const ARMCPRegInfo *ri;
1729     TCGv_i64 tcg_rt;
1730 
1731     ri = get_arm_cp_reginfo(s->cp_regs,
1732                             ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1733                                                crn, crm, op0, op1, op2));
1734 
1735     if (!ri) {
1736         /* Unknown register; this might be a guest error or a QEMU
1737          * unimplemented feature.
1738          */
1739         qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
1740                       "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1741                       isread ? "read" : "write", op0, op1, crn, crm, op2);
1742         unallocated_encoding(s);
1743         return;
1744     }
1745 
1746     /* Check access permissions */
1747     if (!cp_access_ok(s->current_el, ri, isread)) {
1748         unallocated_encoding(s);
1749         return;
1750     }
1751 
1752     if (ri->accessfn) {
1753         /* Emit code to perform further access permissions checks at
1754          * runtime; this may result in an exception.
1755          */
1756         TCGv_ptr tmpptr;
1757         TCGv_i32 tcg_syn, tcg_isread;
1758         uint32_t syndrome;
1759 
1760         gen_a64_set_pc_im(s->pc_curr);
1761         tmpptr = tcg_const_ptr(ri);
1762         syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1763         tcg_syn = tcg_const_i32(syndrome);
1764         tcg_isread = tcg_const_i32(isread);
1765         gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread);
1766         tcg_temp_free_ptr(tmpptr);
1767         tcg_temp_free_i32(tcg_syn);
1768         tcg_temp_free_i32(tcg_isread);
1769     } else if (ri->type & ARM_CP_RAISES_EXC) {
1770         /*
1771          * The readfn or writefn might raise an exception;
1772          * synchronize the CPU state in case it does.
1773          */
1774         gen_a64_set_pc_im(s->pc_curr);
1775     }
1776 
1777     /* Handle special cases first */
1778     switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
1779     case ARM_CP_NOP:
1780         return;
1781     case ARM_CP_NZCV:
1782         tcg_rt = cpu_reg(s, rt);
1783         if (isread) {
1784             gen_get_nzcv(tcg_rt);
1785         } else {
1786             gen_set_nzcv(tcg_rt);
1787         }
1788         return;
1789     case ARM_CP_CURRENTEL:
1790         /* Reads as current EL value from pstate, which is
1791          * guaranteed to be constant by the tb flags.
1792          */
1793         tcg_rt = cpu_reg(s, rt);
1794         tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
1795         return;
1796     case ARM_CP_DC_ZVA:
1797         /* Writes clear the aligned block of memory which rt points into. */
1798         tcg_rt = clean_data_tbi(s, cpu_reg(s, rt));
1799         gen_helper_dc_zva(cpu_env, tcg_rt);
1800         return;
1801     default:
1802         break;
1803     }
1804     if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
1805         return;
1806     } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
1807         return;
1808     }
1809 
1810     if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1811         gen_io_start();
1812     }
1813 
1814     tcg_rt = cpu_reg(s, rt);
1815 
1816     if (isread) {
1817         if (ri->type & ARM_CP_CONST) {
1818             tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
1819         } else if (ri->readfn) {
1820             TCGv_ptr tmpptr;
1821             tmpptr = tcg_const_ptr(ri);
1822             gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
1823             tcg_temp_free_ptr(tmpptr);
1824         } else {
1825             tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
1826         }
1827     } else {
1828         if (ri->type & ARM_CP_CONST) {
1829             /* If not forbidden by access permissions, treat as WI */
1830             return;
1831         } else if (ri->writefn) {
1832             TCGv_ptr tmpptr;
1833             tmpptr = tcg_const_ptr(ri);
1834             gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
1835             tcg_temp_free_ptr(tmpptr);
1836         } else {
1837             tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
1838         }
1839     }
1840 
1841     if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1842         /* I/O operations must end the TB here (whether read or write) */
1843         s->base.is_jmp = DISAS_UPDATE;
1844     }
1845     if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
1846         /*
1847          * A write to any coprocessor regiser that ends a TB
1848          * must rebuild the hflags for the next TB.
1849          */
1850         TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
1851         gen_helper_rebuild_hflags_a64(cpu_env, tcg_el);
1852         tcg_temp_free_i32(tcg_el);
1853         /*
1854          * We default to ending the TB on a coprocessor register write,
1855          * but allow this to be suppressed by the register definition
1856          * (usually only necessary to work around guest bugs).
1857          */
1858         s->base.is_jmp = DISAS_UPDATE;
1859     }
1860 }
1861 
1862 /* System
1863  *  31                 22 21  20 19 18 16 15   12 11    8 7   5 4    0
1864  * +---------------------+---+-----+-----+-------+-------+-----+------+
1865  * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 |  CRn  |  CRm  | op2 |  Rt  |
1866  * +---------------------+---+-----+-----+-------+-------+-----+------+
1867  */
disas_system(DisasContext * s,uint32_t insn)1868 static void disas_system(DisasContext *s, uint32_t insn)
1869 {
1870     unsigned int l, op0, op1, crn, crm, op2, rt;
1871     l = extract32(insn, 21, 1);
1872     op0 = extract32(insn, 19, 2);
1873     op1 = extract32(insn, 16, 3);
1874     crn = extract32(insn, 12, 4);
1875     crm = extract32(insn, 8, 4);
1876     op2 = extract32(insn, 5, 3);
1877     rt = extract32(insn, 0, 5);
1878 
1879     if (op0 == 0) {
1880         if (l || rt != 31) {
1881             unallocated_encoding(s);
1882             return;
1883         }
1884         switch (crn) {
1885         case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */
1886             handle_hint(s, insn, op1, op2, crm);
1887             break;
1888         case 3: /* CLREX, DSB, DMB, ISB */
1889             handle_sync(s, insn, op1, op2, crm);
1890             break;
1891         case 4: /* MSR (immediate) */
1892             handle_msr_i(s, insn, op1, op2, crm);
1893             break;
1894         default:
1895             unallocated_encoding(s);
1896             break;
1897         }
1898         return;
1899     }
1900     handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
1901 }
1902 
1903 /* Exception generation
1904  *
1905  *  31             24 23 21 20                     5 4   2 1  0
1906  * +-----------------+-----+------------------------+-----+----+
1907  * | 1 1 0 1 0 1 0 0 | opc |          imm16         | op2 | LL |
1908  * +-----------------------+------------------------+----------+
1909  */
disas_exc(DisasContext * s,uint32_t insn)1910 static void disas_exc(DisasContext *s, uint32_t insn)
1911 {
1912     int opc = extract32(insn, 21, 3);
1913     int op2_ll = extract32(insn, 0, 5);
1914     int imm16 = extract32(insn, 5, 16);
1915     TCGv_i32 tmp;
1916 
1917     switch (opc) {
1918     case 0:
1919         /* For SVC, HVC and SMC we advance the single-step state
1920          * machine before taking the exception. This is architecturally
1921          * mandated, to ensure that single-stepping a system call
1922          * instruction works properly.
1923          */
1924         switch (op2_ll) {
1925         case 1:                                                     /* SVC */
1926             gen_ss_advance(s);
1927             gen_exception_insn(s, s->base.pc_next, EXCP_SWI,
1928                                syn_aa64_svc(imm16), default_exception_el(s));
1929             break;
1930         case 2:                                                     /* HVC */
1931             if (s->current_el == 0) {
1932                 unallocated_encoding(s);
1933                 break;
1934             }
1935             /* The pre HVC helper handles cases when HVC gets trapped
1936              * as an undefined insn by runtime configuration.
1937              */
1938             gen_a64_set_pc_im(s->pc_curr);
1939             gen_helper_pre_hvc(cpu_env);
1940             gen_ss_advance(s);
1941             gen_exception_insn(s, s->base.pc_next, EXCP_HVC,
1942                                syn_aa64_hvc(imm16), 2);
1943             break;
1944         case 3:                                                     /* SMC */
1945             if (s->current_el == 0) {
1946                 unallocated_encoding(s);
1947                 break;
1948             }
1949             gen_a64_set_pc_im(s->pc_curr);
1950             tmp = tcg_const_i32(syn_aa64_smc(imm16));
1951             gen_helper_pre_smc(cpu_env, tmp);
1952             tcg_temp_free_i32(tmp);
1953             gen_ss_advance(s);
1954             gen_exception_insn(s, s->base.pc_next, EXCP_SMC,
1955                                syn_aa64_smc(imm16), 3);
1956             break;
1957         default:
1958             unallocated_encoding(s);
1959             break;
1960         }
1961         break;
1962     case 1:
1963         if (op2_ll != 0) {
1964             unallocated_encoding(s);
1965             break;
1966         }
1967         /* BRK */
1968         gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
1969         break;
1970     case 2:
1971         if (op2_ll != 0) {
1972             unallocated_encoding(s);
1973             break;
1974         }
1975         /* HLT. This has two purposes.
1976          * Architecturally, it is an external halting debug instruction.
1977          * Since QEMU doesn't implement external debug, we treat this as
1978          * it is required for halting debug disabled: it will UNDEF.
1979          * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
1980          */
1981         if (semihosting_enabled() && imm16 == 0xf000) {
1982 #ifndef CONFIG_USER_ONLY
1983             /* In system mode, don't allow userspace access to semihosting,
1984              * to provide some semblance of security (and for consistency
1985              * with our 32-bit semihosting).
1986              */
1987             if (s->current_el == 0) {
1988                 unsupported_encoding(s, insn);
1989                 break;
1990             }
1991 #endif
1992             gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
1993         } else {
1994             unsupported_encoding(s, insn);
1995         }
1996         break;
1997     case 5:
1998         if (op2_ll < 1 || op2_ll > 3) {
1999             unallocated_encoding(s);
2000             break;
2001         }
2002         /* DCPS1, DCPS2, DCPS3 */
2003         unsupported_encoding(s, insn);
2004         break;
2005     default:
2006         unallocated_encoding(s);
2007         break;
2008     }
2009 }
2010 
2011 /* Unconditional branch (register)
2012  *  31           25 24   21 20   16 15   10 9    5 4     0
2013  * +---------------+-------+-------+-------+------+-------+
2014  * | 1 1 0 1 0 1 1 |  opc  |  op2  |  op3  |  Rn  |  op4  |
2015  * +---------------+-------+-------+-------+------+-------+
2016  */
disas_uncond_b_reg(DisasContext * s,uint32_t insn)2017 static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
2018 {
2019     unsigned int opc, op2, op3, rn, op4;
2020     unsigned btype_mod = 2;   /* 0: BR, 1: BLR, 2: other */
2021     TCGv_i64 dst;
2022     TCGv_i64 modifier;
2023 
2024     opc = extract32(insn, 21, 4);
2025     op2 = extract32(insn, 16, 5);
2026     op3 = extract32(insn, 10, 6);
2027     rn = extract32(insn, 5, 5);
2028     op4 = extract32(insn, 0, 5);
2029 
2030     if (op2 != 0x1f) {
2031         goto do_unallocated;
2032     }
2033 
2034     switch (opc) {
2035     case 0: /* BR */
2036     case 1: /* BLR */
2037     case 2: /* RET */
2038         btype_mod = opc;
2039         switch (op3) {
2040         case 0:
2041             /* BR, BLR, RET */
2042             if (op4 != 0) {
2043                 goto do_unallocated;
2044             }
2045             dst = cpu_reg(s, rn);
2046             break;
2047 
2048         case 2:
2049         case 3:
2050             if (!dc_isar_feature(aa64_pauth, s)) {
2051                 goto do_unallocated;
2052             }
2053             if (opc == 2) {
2054                 /* RETAA, RETAB */
2055                 if (rn != 0x1f || op4 != 0x1f) {
2056                     goto do_unallocated;
2057                 }
2058                 rn = 30;
2059                 modifier = cpu_X[31];
2060             } else {
2061                 /* BRAAZ, BRABZ, BLRAAZ, BLRABZ */
2062                 if (op4 != 0x1f) {
2063                     goto do_unallocated;
2064                 }
2065                 modifier = new_tmp_a64_zero(s);
2066             }
2067             if (s->pauth_active) {
2068                 dst = new_tmp_a64(s);
2069                 if (op3 == 2) {
2070                     gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
2071                 } else {
2072                     gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
2073                 }
2074             } else {
2075                 dst = cpu_reg(s, rn);
2076             }
2077             break;
2078 
2079         default:
2080             goto do_unallocated;
2081         }
2082         gen_a64_set_pc(s, dst);
2083         /* BLR also needs to load return address */
2084         if (opc == 1) {
2085             tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
2086         }
2087         break;
2088 
2089     case 8: /* BRAA */
2090     case 9: /* BLRAA */
2091         if (!dc_isar_feature(aa64_pauth, s)) {
2092             goto do_unallocated;
2093         }
2094         if ((op3 & ~1) != 2) {
2095             goto do_unallocated;
2096         }
2097         btype_mod = opc & 1;
2098         if (s->pauth_active) {
2099             dst = new_tmp_a64(s);
2100             modifier = cpu_reg_sp(s, op4);
2101             if (op3 == 2) {
2102                 gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
2103             } else {
2104                 gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
2105             }
2106         } else {
2107             dst = cpu_reg(s, rn);
2108         }
2109         gen_a64_set_pc(s, dst);
2110         /* BLRAA also needs to load return address */
2111         if (opc == 9) {
2112             tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
2113         }
2114         break;
2115 
2116     case 4: /* ERET */
2117         if (s->current_el == 0) {
2118             goto do_unallocated;
2119         }
2120         switch (op3) {
2121         case 0: /* ERET */
2122             if (op4 != 0) {
2123                 goto do_unallocated;
2124             }
2125             dst = tcg_temp_new_i64();
2126             tcg_gen_ld_i64(dst, cpu_env,
2127                            offsetof(CPUARMState, elr_el[s->current_el]));
2128             break;
2129 
2130         case 2: /* ERETAA */
2131         case 3: /* ERETAB */
2132             if (!dc_isar_feature(aa64_pauth, s)) {
2133                 goto do_unallocated;
2134             }
2135             if (rn != 0x1f || op4 != 0x1f) {
2136                 goto do_unallocated;
2137             }
2138             dst = tcg_temp_new_i64();
2139             tcg_gen_ld_i64(dst, cpu_env,
2140                            offsetof(CPUARMState, elr_el[s->current_el]));
2141             if (s->pauth_active) {
2142                 modifier = cpu_X[31];
2143                 if (op3 == 2) {
2144                     gen_helper_autia(dst, cpu_env, dst, modifier);
2145                 } else {
2146                     gen_helper_autib(dst, cpu_env, dst, modifier);
2147                 }
2148             }
2149             break;
2150 
2151         default:
2152             goto do_unallocated;
2153         }
2154         if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
2155             gen_io_start();
2156         }
2157 
2158         gen_helper_exception_return(cpu_env, dst);
2159         tcg_temp_free_i64(dst);
2160         /* Must exit loop to check un-masked IRQs */
2161         s->base.is_jmp = DISAS_EXIT;
2162         return;
2163 
2164     case 5: /* DRPS */
2165         if (op3 != 0 || op4 != 0 || rn != 0x1f) {
2166             goto do_unallocated;
2167         } else {
2168             unsupported_encoding(s, insn);
2169         }
2170         return;
2171 
2172     default:
2173     do_unallocated:
2174         unallocated_encoding(s);
2175         return;
2176     }
2177 
2178     switch (btype_mod) {
2179     case 0: /* BR */
2180         if (dc_isar_feature(aa64_bti, s)) {
2181             /* BR to {x16,x17} or !guard -> 1, else 3.  */
2182             set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
2183         }
2184         break;
2185 
2186     case 1: /* BLR */
2187         if (dc_isar_feature(aa64_bti, s)) {
2188             /* BLR sets BTYPE to 2, regardless of source guarded page.  */
2189             set_btype(s, 2);
2190         }
2191         break;
2192 
2193     default: /* RET or none of the above.  */
2194         /* BTYPE will be set to 0 by normal end-of-insn processing.  */
2195         break;
2196     }
2197 
2198     s->base.is_jmp = DISAS_JUMP;
2199 }
2200 
2201 /* Branches, exception generating and system instructions */
disas_b_exc_sys(DisasContext * s,uint32_t insn)2202 static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
2203 {
2204     switch (extract32(insn, 25, 7)) {
2205     case 0x0a: case 0x0b:
2206     case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
2207         disas_uncond_b_imm(s, insn);
2208         break;
2209     case 0x1a: case 0x5a: /* Compare & branch (immediate) */
2210         disas_comp_b_imm(s, insn);
2211         break;
2212     case 0x1b: case 0x5b: /* Test & branch (immediate) */
2213         disas_test_b_imm(s, insn);
2214         break;
2215     case 0x2a: /* Conditional branch (immediate) */
2216         disas_cond_b_imm(s, insn);
2217         break;
2218     case 0x6a: /* Exception generation / System */
2219         if (insn & (1 << 24)) {
2220             if (extract32(insn, 22, 2) == 0) {
2221                 disas_system(s, insn);
2222             } else {
2223                 unallocated_encoding(s);
2224             }
2225         } else {
2226             disas_exc(s, insn);
2227         }
2228         break;
2229     case 0x6b: /* Unconditional branch (register) */
2230         disas_uncond_b_reg(s, insn);
2231         break;
2232     default:
2233         unallocated_encoding(s);
2234         break;
2235     }
2236 }
2237 
2238 /*
2239  * Load/Store exclusive instructions are implemented by remembering
2240  * the value/address loaded, and seeing if these are the same
2241  * when the store is performed. This is not actually the architecturally
2242  * mandated semantics, but it works for typical guest code sequences
2243  * and avoids having to monitor regular stores.
2244  *
2245  * The store exclusive uses the atomic cmpxchg primitives to avoid
2246  * races in multi-threaded linux-user and when MTTCG softmmu is
2247  * enabled.
2248  */
gen_load_exclusive(DisasContext * s,int rt,int rt2,TCGv_i64 addr,int size,bool is_pair)2249 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
2250                                TCGv_i64 addr, int size, bool is_pair)
2251 {
2252     int idx = get_mem_index(s);
2253     MemOp memop = s->be_data;
2254 
2255     g_assert(size <= 3);
2256     if (is_pair) {
2257         g_assert(size >= 2);
2258         if (size == 2) {
2259             /* The pair must be single-copy atomic for the doubleword.  */
2260             memop |= MO_64 | MO_ALIGN;
2261             tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2262             if (s->be_data == MO_LE) {
2263                 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
2264                 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
2265             } else {
2266                 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
2267                 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
2268             }
2269         } else {
2270             /* The pair must be single-copy atomic for *each* doubleword, not
2271                the entire quadword, however it must be quadword aligned.  */
2272             memop |= MO_64;
2273             tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
2274                                 memop | MO_ALIGN_16);
2275 
2276             TCGv_i64 addr2 = tcg_temp_new_i64();
2277             tcg_gen_addi_i64(addr2, addr, 8);
2278             tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop);
2279             tcg_temp_free_i64(addr2);
2280 
2281             tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2282             tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
2283         }
2284     } else {
2285         memop |= size | MO_ALIGN;
2286         tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2287         tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2288     }
2289     tcg_gen_mov_i64(cpu_exclusive_addr, addr);
2290 }
2291 
gen_store_exclusive(DisasContext * s,int rd,int rt,int rt2,TCGv_i64 addr,int size,int is_pair)2292 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
2293                                 TCGv_i64 addr, int size, int is_pair)
2294 {
2295     /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2296      *     && (!is_pair || env->exclusive_high == [addr + datasize])) {
2297      *     [addr] = {Rt};
2298      *     if (is_pair) {
2299      *         [addr + datasize] = {Rt2};
2300      *     }
2301      *     {Rd} = 0;
2302      * } else {
2303      *     {Rd} = 1;
2304      * }
2305      * env->exclusive_addr = -1;
2306      */
2307     TCGLabel *fail_label = gen_new_label();
2308     TCGLabel *done_label = gen_new_label();
2309     TCGv_i64 tmp;
2310 
2311     tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
2312 
2313     tmp = tcg_temp_new_i64();
2314     if (is_pair) {
2315         if (size == 2) {
2316             if (s->be_data == MO_LE) {
2317                 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
2318             } else {
2319                 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
2320             }
2321             tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
2322                                        cpu_exclusive_val, tmp,
2323                                        get_mem_index(s),
2324                                        MO_64 | MO_ALIGN | s->be_data);
2325             tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2326         } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2327             if (!HAVE_CMPXCHG128) {
2328                 gen_helper_exit_atomic(cpu_env);
2329                 s->base.is_jmp = DISAS_NORETURN;
2330             } else if (s->be_data == MO_LE) {
2331                 gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
2332                                                         cpu_exclusive_addr,
2333                                                         cpu_reg(s, rt),
2334                                                         cpu_reg(s, rt2));
2335             } else {
2336                 gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
2337                                                         cpu_exclusive_addr,
2338                                                         cpu_reg(s, rt),
2339                                                         cpu_reg(s, rt2));
2340             }
2341         } else if (s->be_data == MO_LE) {
2342             gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
2343                                            cpu_reg(s, rt), cpu_reg(s, rt2));
2344         } else {
2345             gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
2346                                            cpu_reg(s, rt), cpu_reg(s, rt2));
2347         }
2348     } else {
2349         tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2350                                    cpu_reg(s, rt), get_mem_index(s),
2351                                    size | MO_ALIGN | s->be_data);
2352         tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2353     }
2354     tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2355     tcg_temp_free_i64(tmp);
2356     tcg_gen_br(done_label);
2357 
2358     gen_set_label(fail_label);
2359     tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2360     gen_set_label(done_label);
2361     tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2362 }
2363 
gen_compare_and_swap(DisasContext * s,int rs,int rt,int rn,int size)2364 static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
2365                                  int rn, int size)
2366 {
2367     TCGv_i64 tcg_rs = cpu_reg(s, rs);
2368     TCGv_i64 tcg_rt = cpu_reg(s, rt);
2369     int memidx = get_mem_index(s);
2370     TCGv_i64 clean_addr;
2371 
2372     if (rn == 31) {
2373         gen_check_sp_alignment(s);
2374     }
2375     clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2376     tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx,
2377                                size | MO_ALIGN | s->be_data);
2378 }
2379 
gen_compare_and_swap_pair(DisasContext * s,int rs,int rt,int rn,int size)2380 static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
2381                                       int rn, int size)
2382 {
2383     TCGv_i64 s1 = cpu_reg(s, rs);
2384     TCGv_i64 s2 = cpu_reg(s, rs + 1);
2385     TCGv_i64 t1 = cpu_reg(s, rt);
2386     TCGv_i64 t2 = cpu_reg(s, rt + 1);
2387     TCGv_i64 clean_addr;
2388     int memidx = get_mem_index(s);
2389 
2390     if (rn == 31) {
2391         gen_check_sp_alignment(s);
2392     }
2393     clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2394 
2395     if (size == 2) {
2396         TCGv_i64 cmp = tcg_temp_new_i64();
2397         TCGv_i64 val = tcg_temp_new_i64();
2398 
2399         if (s->be_data == MO_LE) {
2400             tcg_gen_concat32_i64(val, t1, t2);
2401             tcg_gen_concat32_i64(cmp, s1, s2);
2402         } else {
2403             tcg_gen_concat32_i64(val, t2, t1);
2404             tcg_gen_concat32_i64(cmp, s2, s1);
2405         }
2406 
2407         tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx,
2408                                    MO_64 | MO_ALIGN | s->be_data);
2409         tcg_temp_free_i64(val);
2410 
2411         if (s->be_data == MO_LE) {
2412             tcg_gen_extr32_i64(s1, s2, cmp);
2413         } else {
2414             tcg_gen_extr32_i64(s2, s1, cmp);
2415         }
2416         tcg_temp_free_i64(cmp);
2417     } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2418         if (HAVE_CMPXCHG128) {
2419             TCGv_i32 tcg_rs = tcg_const_i32(rs);
2420             if (s->be_data == MO_LE) {
2421                 gen_helper_casp_le_parallel(cpu_env, tcg_rs,
2422                                             clean_addr, t1, t2);
2423             } else {
2424                 gen_helper_casp_be_parallel(cpu_env, tcg_rs,
2425                                             clean_addr, t1, t2);
2426             }
2427             tcg_temp_free_i32(tcg_rs);
2428         } else {
2429             gen_helper_exit_atomic(cpu_env);
2430             s->base.is_jmp = DISAS_NORETURN;
2431         }
2432     } else {
2433         TCGv_i64 d1 = tcg_temp_new_i64();
2434         TCGv_i64 d2 = tcg_temp_new_i64();
2435         TCGv_i64 a2 = tcg_temp_new_i64();
2436         TCGv_i64 c1 = tcg_temp_new_i64();
2437         TCGv_i64 c2 = tcg_temp_new_i64();
2438         TCGv_i64 zero = tcg_const_i64(0);
2439 
2440         /* Load the two words, in memory order.  */
2441         tcg_gen_qemu_ld_i64(d1, clean_addr, memidx,
2442                             MO_64 | MO_ALIGN_16 | s->be_data);
2443         tcg_gen_addi_i64(a2, clean_addr, 8);
2444         tcg_gen_qemu_ld_i64(d2, a2, memidx, MO_64 | s->be_data);
2445 
2446         /* Compare the two words, also in memory order.  */
2447         tcg_gen_setcond_i64(TCG_COND_EQ, c1, d1, s1);
2448         tcg_gen_setcond_i64(TCG_COND_EQ, c2, d2, s2);
2449         tcg_gen_and_i64(c2, c2, c1);
2450 
2451         /* If compare equal, write back new data, else write back old data.  */
2452         tcg_gen_movcond_i64(TCG_COND_NE, c1, c2, zero, t1, d1);
2453         tcg_gen_movcond_i64(TCG_COND_NE, c2, c2, zero, t2, d2);
2454         tcg_gen_qemu_st_i64(c1, clean_addr, memidx, MO_64 | s->be_data);
2455         tcg_gen_qemu_st_i64(c2, a2, memidx, MO_64 | s->be_data);
2456         tcg_temp_free_i64(a2);
2457         tcg_temp_free_i64(c1);
2458         tcg_temp_free_i64(c2);
2459         tcg_temp_free_i64(zero);
2460 
2461         /* Write back the data from memory to Rs.  */
2462         tcg_gen_mov_i64(s1, d1);
2463         tcg_gen_mov_i64(s2, d2);
2464         tcg_temp_free_i64(d1);
2465         tcg_temp_free_i64(d2);
2466     }
2467 }
2468 
2469 /* Update the Sixty-Four bit (SF) registersize. This logic is derived
2470  * from the ARMv8 specs for LDR (Shared decode for all encodings).
2471  */
disas_ldst_compute_iss_sf(int size,bool is_signed,int opc)2472 static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
2473 {
2474     int opc0 = extract32(opc, 0, 1);
2475     int regsize;
2476 
2477     if (is_signed) {
2478         regsize = opc0 ? 32 : 64;
2479     } else {
2480         regsize = size == 3 ? 64 : 32;
2481     }
2482     return regsize == 64;
2483 }
2484 
2485 /* Load/store exclusive
2486  *
2487  *  31 30 29         24  23  22   21  20  16  15  14   10 9    5 4    0
2488  * +-----+-------------+----+---+----+------+----+-------+------+------+
2489  * | sz  | 0 0 1 0 0 0 | o2 | L | o1 |  Rs  | o0 |  Rt2  |  Rn  | Rt   |
2490  * +-----+-------------+----+---+----+------+----+-------+------+------+
2491  *
2492  *  sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
2493  *   L: 0 -> store, 1 -> load
2494  *  o2: 0 -> exclusive, 1 -> not
2495  *  o1: 0 -> single register, 1 -> register pair
2496  *  o0: 1 -> load-acquire/store-release, 0 -> not
2497  */
disas_ldst_excl(DisasContext * s,uint32_t insn)2498 static void disas_ldst_excl(DisasContext *s, uint32_t insn)
2499 {
2500     int rt = extract32(insn, 0, 5);
2501     int rn = extract32(insn, 5, 5);
2502     int rt2 = extract32(insn, 10, 5);
2503     int rs = extract32(insn, 16, 5);
2504     int is_lasr = extract32(insn, 15, 1);
2505     int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
2506     int size = extract32(insn, 30, 2);
2507     TCGv_i64 clean_addr;
2508 
2509     switch (o2_L_o1_o0) {
2510     case 0x0: /* STXR */
2511     case 0x1: /* STLXR */
2512         if (rn == 31) {
2513             gen_check_sp_alignment(s);
2514         }
2515         if (is_lasr) {
2516             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2517         }
2518         clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2519         gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
2520         return;
2521 
2522     case 0x4: /* LDXR */
2523     case 0x5: /* LDAXR */
2524         if (rn == 31) {
2525             gen_check_sp_alignment(s);
2526         }
2527         clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2528         s->is_ldex = true;
2529         gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
2530         if (is_lasr) {
2531             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2532         }
2533         return;
2534 
2535     case 0x8: /* STLLR */
2536         if (!dc_isar_feature(aa64_lor, s)) {
2537             break;
2538         }
2539         /* StoreLORelease is the same as Store-Release for QEMU.  */
2540         /* fall through */
2541     case 0x9: /* STLR */
2542         /* Generate ISS for non-exclusive accesses including LASR.  */
2543         if (rn == 31) {
2544             gen_check_sp_alignment(s);
2545         }
2546         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2547         clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2548         do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt,
2549                   disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2550         return;
2551 
2552     case 0xc: /* LDLAR */
2553         if (!dc_isar_feature(aa64_lor, s)) {
2554             break;
2555         }
2556         /* LoadLOAcquire is the same as Load-Acquire for QEMU.  */
2557         /* fall through */
2558     case 0xd: /* LDAR */
2559         /* Generate ISS for non-exclusive accesses including LASR.  */
2560         if (rn == 31) {
2561             gen_check_sp_alignment(s);
2562         }
2563         clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2564         do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, true, rt,
2565                   disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2566         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2567         return;
2568 
2569     case 0x2: case 0x3: /* CASP / STXP */
2570         if (size & 2) { /* STXP / STLXP */
2571             if (rn == 31) {
2572                 gen_check_sp_alignment(s);
2573             }
2574             if (is_lasr) {
2575                 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2576             }
2577             clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2578             gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true);
2579             return;
2580         }
2581         if (rt2 == 31
2582             && ((rt | rs) & 1) == 0
2583             && dc_isar_feature(aa64_atomics, s)) {
2584             /* CASP / CASPL */
2585             gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2586             return;
2587         }
2588         break;
2589 
2590     case 0x6: case 0x7: /* CASPA / LDXP */
2591         if (size & 2) { /* LDXP / LDAXP */
2592             if (rn == 31) {
2593                 gen_check_sp_alignment(s);
2594             }
2595             clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2596             s->is_ldex = true;
2597             gen_load_exclusive(s, rt, rt2, clean_addr, size, true);
2598             if (is_lasr) {
2599                 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2600             }
2601             return;
2602         }
2603         if (rt2 == 31
2604             && ((rt | rs) & 1) == 0
2605             && dc_isar_feature(aa64_atomics, s)) {
2606             /* CASPA / CASPAL */
2607             gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2608             return;
2609         }
2610         break;
2611 
2612     case 0xa: /* CAS */
2613     case 0xb: /* CASL */
2614     case 0xe: /* CASA */
2615     case 0xf: /* CASAL */
2616         if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) {
2617             gen_compare_and_swap(s, rs, rt, rn, size);
2618             return;
2619         }
2620         break;
2621     }
2622     unallocated_encoding(s);
2623 }
2624 
2625 /*
2626  * Load register (literal)
2627  *
2628  *  31 30 29   27  26 25 24 23                5 4     0
2629  * +-----+-------+---+-----+-------------------+-------+
2630  * | opc | 0 1 1 | V | 0 0 |     imm19         |  Rt   |
2631  * +-----+-------+---+-----+-------------------+-------+
2632  *
2633  * V: 1 -> vector (simd/fp)
2634  * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
2635  *                   10-> 32 bit signed, 11 -> prefetch
2636  * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
2637  */
disas_ld_lit(DisasContext * s,uint32_t insn)2638 static void disas_ld_lit(DisasContext *s, uint32_t insn)
2639 {
2640     int rt = extract32(insn, 0, 5);
2641     int64_t imm = sextract32(insn, 5, 19) << 2;
2642     bool is_vector = extract32(insn, 26, 1);
2643     int opc = extract32(insn, 30, 2);
2644     bool is_signed = false;
2645     int size = 2;
2646     TCGv_i64 tcg_rt, clean_addr;
2647 
2648     if (is_vector) {
2649         if (opc == 3) {
2650             unallocated_encoding(s);
2651             return;
2652         }
2653         size = 2 + opc;
2654         if (!fp_access_check(s)) {
2655             return;
2656         }
2657     } else {
2658         if (opc == 3) {
2659             /* PRFM (literal) : prefetch */
2660             return;
2661         }
2662         size = 2 + extract32(opc, 0, 1);
2663         is_signed = extract32(opc, 1, 1);
2664     }
2665 
2666     tcg_rt = cpu_reg(s, rt);
2667 
2668     clean_addr = tcg_const_i64(s->pc_curr + imm);
2669     if (is_vector) {
2670         do_fp_ld(s, rt, clean_addr, size);
2671     } else {
2672         /* Only unsigned 32bit loads target 32bit registers.  */
2673         bool iss_sf = opc != 0;
2674 
2675         do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, false,
2676                   true, rt, iss_sf, false);
2677     }
2678     tcg_temp_free_i64(clean_addr);
2679 }
2680 
2681 /*
2682  * LDNP (Load Pair - non-temporal hint)
2683  * LDP (Load Pair - non vector)
2684  * LDPSW (Load Pair Signed Word - non vector)
2685  * STNP (Store Pair - non-temporal hint)
2686  * STP (Store Pair - non vector)
2687  * LDNP (Load Pair of SIMD&FP - non-temporal hint)
2688  * LDP (Load Pair of SIMD&FP)
2689  * STNP (Store Pair of SIMD&FP - non-temporal hint)
2690  * STP (Store Pair of SIMD&FP)
2691  *
2692  *  31 30 29   27  26  25 24   23  22 21   15 14   10 9    5 4    0
2693  * +-----+-------+---+---+-------+---+-----------------------------+
2694  * | opc | 1 0 1 | V | 0 | index | L |  imm7 |  Rt2  |  Rn  | Rt   |
2695  * +-----+-------+---+---+-------+---+-------+-------+------+------+
2696  *
2697  * opc: LDP/STP/LDNP/STNP        00 -> 32 bit, 10 -> 64 bit
2698  *      LDPSW                    01
2699  *      LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
2700  *   V: 0 -> GPR, 1 -> Vector
2701  * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
2702  *      10 -> signed offset, 11 -> pre-index
2703  *   L: 0 -> Store 1 -> Load
2704  *
2705  * Rt, Rt2 = GPR or SIMD registers to be stored
2706  * Rn = general purpose register containing address
2707  * imm7 = signed offset (multiple of 4 or 8 depending on size)
2708  */
disas_ldst_pair(DisasContext * s,uint32_t insn)2709 static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2710 {
2711     int rt = extract32(insn, 0, 5);
2712     int rn = extract32(insn, 5, 5);
2713     int rt2 = extract32(insn, 10, 5);
2714     uint64_t offset = sextract64(insn, 15, 7);
2715     int index = extract32(insn, 23, 2);
2716     bool is_vector = extract32(insn, 26, 1);
2717     bool is_load = extract32(insn, 22, 1);
2718     int opc = extract32(insn, 30, 2);
2719 
2720     bool is_signed = false;
2721     bool postindex = false;
2722     bool wback = false;
2723 
2724     TCGv_i64 clean_addr, dirty_addr;
2725 
2726     int size;
2727 
2728     if (opc == 3) {
2729         unallocated_encoding(s);
2730         return;
2731     }
2732 
2733     if (is_vector) {
2734         size = 2 + opc;
2735     } else {
2736         size = 2 + extract32(opc, 1, 1);
2737         is_signed = extract32(opc, 0, 1);
2738         if (!is_load && is_signed) {
2739             unallocated_encoding(s);
2740             return;
2741         }
2742     }
2743 
2744     switch (index) {
2745     case 1: /* post-index */
2746         postindex = true;
2747         wback = true;
2748         break;
2749     case 0:
2750         /* signed offset with "non-temporal" hint. Since we don't emulate
2751          * caches we don't care about hints to the cache system about
2752          * data access patterns, and handle this identically to plain
2753          * signed offset.
2754          */
2755         if (is_signed) {
2756             /* There is no non-temporal-hint version of LDPSW */
2757             unallocated_encoding(s);
2758             return;
2759         }
2760         postindex = false;
2761         break;
2762     case 2: /* signed offset, rn not updated */
2763         postindex = false;
2764         break;
2765     case 3: /* pre-index */
2766         postindex = false;
2767         wback = true;
2768         break;
2769     }
2770 
2771     if (is_vector && !fp_access_check(s)) {
2772         return;
2773     }
2774 
2775     offset <<= size;
2776 
2777     if (rn == 31) {
2778         gen_check_sp_alignment(s);
2779     }
2780 
2781     dirty_addr = read_cpu_reg_sp(s, rn, 1);
2782     if (!postindex) {
2783         tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
2784     }
2785     clean_addr = clean_data_tbi(s, dirty_addr);
2786 
2787     if (is_vector) {
2788         if (is_load) {
2789             do_fp_ld(s, rt, clean_addr, size);
2790         } else {
2791             do_fp_st(s, rt, clean_addr, size);
2792         }
2793         tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2794         if (is_load) {
2795             do_fp_ld(s, rt2, clean_addr, size);
2796         } else {
2797             do_fp_st(s, rt2, clean_addr, size);
2798         }
2799     } else {
2800         TCGv_i64 tcg_rt = cpu_reg(s, rt);
2801         TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
2802 
2803         if (is_load) {
2804             TCGv_i64 tmp = tcg_temp_new_i64();
2805 
2806             /* Do not modify tcg_rt before recognizing any exception
2807              * from the second load.
2808              */
2809             do_gpr_ld(s, tmp, clean_addr, size, is_signed, false,
2810                       false, 0, false, false);
2811             tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2812             do_gpr_ld(s, tcg_rt2, clean_addr, size, is_signed, false,
2813                       false, 0, false, false);
2814 
2815             tcg_gen_mov_i64(tcg_rt, tmp);
2816             tcg_temp_free_i64(tmp);
2817         } else {
2818             do_gpr_st(s, tcg_rt, clean_addr, size,
2819                       false, 0, false, false);
2820             tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2821             do_gpr_st(s, tcg_rt2, clean_addr, size,
2822                       false, 0, false, false);
2823         }
2824     }
2825 
2826     if (wback) {
2827         if (postindex) {
2828             tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
2829         }
2830         tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
2831     }
2832 }
2833 
2834 /*
2835  * Load/store (immediate post-indexed)
2836  * Load/store (immediate pre-indexed)
2837  * Load/store (unscaled immediate)
2838  *
2839  * 31 30 29   27  26 25 24 23 22 21  20    12 11 10 9    5 4    0
2840  * +----+-------+---+-----+-----+---+--------+-----+------+------+
2841  * |size| 1 1 1 | V | 0 0 | opc | 0 |  imm9  | idx |  Rn  |  Rt  |
2842  * +----+-------+---+-----+-----+---+--------+-----+------+------+
2843  *
2844  * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
2845          10 -> unprivileged
2846  * V = 0 -> non-vector
2847  * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
2848  * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2849  */
disas_ldst_reg_imm9(DisasContext * s,uint32_t insn,int opc,int size,int rt,bool is_vector)2850 static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
2851                                 int opc,
2852                                 int size,
2853                                 int rt,
2854                                 bool is_vector)
2855 {
2856     int rn = extract32(insn, 5, 5);
2857     int imm9 = sextract32(insn, 12, 9);
2858     int idx = extract32(insn, 10, 2);
2859     bool is_signed = false;
2860     bool is_store = false;
2861     bool is_extended = false;
2862     bool is_unpriv = (idx == 2);
2863     bool iss_valid = !is_vector;
2864     bool post_index;
2865     bool writeback;
2866 
2867     TCGv_i64 clean_addr, dirty_addr;
2868 
2869     if (is_vector) {
2870         size |= (opc & 2) << 1;
2871         if (size > 4 || is_unpriv) {
2872             unallocated_encoding(s);
2873             return;
2874         }
2875         is_store = ((opc & 1) == 0);
2876         if (!fp_access_check(s)) {
2877             return;
2878         }
2879     } else {
2880         if (size == 3 && opc == 2) {
2881             /* PRFM - prefetch */
2882             if (idx != 0) {
2883                 unallocated_encoding(s);
2884                 return;
2885             }
2886             return;
2887         }
2888         if (opc == 3 && size > 1) {
2889             unallocated_encoding(s);
2890             return;
2891         }
2892         is_store = (opc == 0);
2893         is_signed = extract32(opc, 1, 1);
2894         is_extended = (size < 3) && extract32(opc, 0, 1);
2895     }
2896 
2897     switch (idx) {
2898     case 0:
2899     case 2:
2900         post_index = false;
2901         writeback = false;
2902         break;
2903     case 1:
2904         post_index = true;
2905         writeback = true;
2906         break;
2907     case 3:
2908         post_index = false;
2909         writeback = true;
2910         break;
2911     default:
2912         g_assert_not_reached();
2913     }
2914 
2915     if (rn == 31) {
2916         gen_check_sp_alignment(s);
2917     }
2918 
2919     dirty_addr = read_cpu_reg_sp(s, rn, 1);
2920     if (!post_index) {
2921         tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
2922     }
2923     clean_addr = clean_data_tbi(s, dirty_addr);
2924 
2925     if (is_vector) {
2926         if (is_store) {
2927             do_fp_st(s, rt, clean_addr, size);
2928         } else {
2929             do_fp_ld(s, rt, clean_addr, size);
2930         }
2931     } else {
2932         TCGv_i64 tcg_rt = cpu_reg(s, rt);
2933         int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
2934         bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2935 
2936         if (is_store) {
2937             do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
2938                              iss_valid, rt, iss_sf, false);
2939         } else {
2940             do_gpr_ld_memidx(s, tcg_rt, clean_addr, size,
2941                              is_signed, is_extended, memidx,
2942                              iss_valid, rt, iss_sf, false);
2943         }
2944     }
2945 
2946     if (writeback) {
2947         TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
2948         if (post_index) {
2949             tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
2950         }
2951         tcg_gen_mov_i64(tcg_rn, dirty_addr);
2952     }
2953 }
2954 
2955 /*
2956  * Load/store (register offset)
2957  *
2958  * 31 30 29   27  26 25 24 23 22 21  20  16 15 13 12 11 10 9  5 4  0
2959  * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2960  * |size| 1 1 1 | V | 0 0 | opc | 1 |  Rm  | opt | S| 1 0 | Rn | Rt |
2961  * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2962  *
2963  * For non-vector:
2964  *   size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2965  *   opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2966  * For vector:
2967  *   size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2968  *   opc<0>: 0 -> store, 1 -> load
2969  * V: 1 -> vector/simd
2970  * opt: extend encoding (see DecodeRegExtend)
2971  * S: if S=1 then scale (essentially index by sizeof(size))
2972  * Rt: register to transfer into/out of
2973  * Rn: address register or SP for base
2974  * Rm: offset register or ZR for offset
2975  */
disas_ldst_reg_roffset(DisasContext * s,uint32_t insn,int opc,int size,int rt,bool is_vector)2976 static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
2977                                    int opc,
2978                                    int size,
2979                                    int rt,
2980                                    bool is_vector)
2981 {
2982     int rn = extract32(insn, 5, 5);
2983     int shift = extract32(insn, 12, 1);
2984     int rm = extract32(insn, 16, 5);
2985     int opt = extract32(insn, 13, 3);
2986     bool is_signed = false;
2987     bool is_store = false;
2988     bool is_extended = false;
2989 
2990     TCGv_i64 tcg_rm, clean_addr, dirty_addr;
2991 
2992     if (extract32(opt, 1, 1) == 0) {
2993         unallocated_encoding(s);
2994         return;
2995     }
2996 
2997     if (is_vector) {
2998         size |= (opc & 2) << 1;
2999         if (size > 4) {
3000             unallocated_encoding(s);
3001             return;
3002         }
3003         is_store = !extract32(opc, 0, 1);
3004         if (!fp_access_check(s)) {
3005             return;
3006         }
3007     } else {
3008         if (size == 3 && opc == 2) {
3009             /* PRFM - prefetch */
3010             return;
3011         }
3012         if (opc == 3 && size > 1) {
3013             unallocated_encoding(s);
3014             return;
3015         }
3016         is_store = (opc == 0);
3017         is_signed = extract32(opc, 1, 1);
3018         is_extended = (size < 3) && extract32(opc, 0, 1);
3019     }
3020 
3021     if (rn == 31) {
3022         gen_check_sp_alignment(s);
3023     }
3024     dirty_addr = read_cpu_reg_sp(s, rn, 1);
3025 
3026     tcg_rm = read_cpu_reg(s, rm, 1);
3027     ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
3028 
3029     tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
3030     clean_addr = clean_data_tbi(s, dirty_addr);
3031 
3032     if (is_vector) {
3033         if (is_store) {
3034             do_fp_st(s, rt, clean_addr, size);
3035         } else {
3036             do_fp_ld(s, rt, clean_addr, size);
3037         }
3038     } else {
3039         TCGv_i64 tcg_rt = cpu_reg(s, rt);
3040         bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3041         if (is_store) {
3042             do_gpr_st(s, tcg_rt, clean_addr, size,
3043                       true, rt, iss_sf, false);
3044         } else {
3045             do_gpr_ld(s, tcg_rt, clean_addr, size,
3046                       is_signed, is_extended,
3047                       true, rt, iss_sf, false);
3048         }
3049     }
3050 }
3051 
3052 /*
3053  * Load/store (unsigned immediate)
3054  *
3055  * 31 30 29   27  26 25 24 23 22 21        10 9     5
3056  * +----+-------+---+-----+-----+------------+-------+------+
3057  * |size| 1 1 1 | V | 0 1 | opc |   imm12    |  Rn   |  Rt  |
3058  * +----+-------+---+-----+-----+------------+-------+------+
3059  *
3060  * For non-vector:
3061  *   size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
3062  *   opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3063  * For vector:
3064  *   size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
3065  *   opc<0>: 0 -> store, 1 -> load
3066  * Rn: base address register (inc SP)
3067  * Rt: target register
3068  */
disas_ldst_reg_unsigned_imm(DisasContext * s,uint32_t insn,int opc,int size,int rt,bool is_vector)3069 static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
3070                                         int opc,
3071                                         int size,
3072                                         int rt,
3073                                         bool is_vector)
3074 {
3075     int rn = extract32(insn, 5, 5);
3076     unsigned int imm12 = extract32(insn, 10, 12);
3077     unsigned int offset;
3078 
3079     TCGv_i64 clean_addr, dirty_addr;
3080 
3081     bool is_store;
3082     bool is_signed = false;
3083     bool is_extended = false;
3084 
3085     if (is_vector) {
3086         size |= (opc & 2) << 1;
3087         if (size > 4) {
3088             unallocated_encoding(s);
3089             return;
3090         }
3091         is_store = !extract32(opc, 0, 1);
3092         if (!fp_access_check(s)) {
3093             return;
3094         }
3095     } else {
3096         if (size == 3 && opc == 2) {
3097             /* PRFM - prefetch */
3098             return;
3099         }
3100         if (opc == 3 && size > 1) {
3101             unallocated_encoding(s);
3102             return;
3103         }
3104         is_store = (opc == 0);
3105         is_signed = extract32(opc, 1, 1);
3106         is_extended = (size < 3) && extract32(opc, 0, 1);
3107     }
3108 
3109     if (rn == 31) {
3110         gen_check_sp_alignment(s);
3111     }
3112     dirty_addr = read_cpu_reg_sp(s, rn, 1);
3113     offset = imm12 << size;
3114     tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3115     clean_addr = clean_data_tbi(s, dirty_addr);
3116 
3117     if (is_vector) {
3118         if (is_store) {
3119             do_fp_st(s, rt, clean_addr, size);
3120         } else {
3121             do_fp_ld(s, rt, clean_addr, size);
3122         }
3123     } else {
3124         TCGv_i64 tcg_rt = cpu_reg(s, rt);
3125         bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3126         if (is_store) {
3127             do_gpr_st(s, tcg_rt, clean_addr, size,
3128                       true, rt, iss_sf, false);
3129         } else {
3130             do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, is_extended,
3131                       true, rt, iss_sf, false);
3132         }
3133     }
3134 }
3135 
3136 /* Atomic memory operations
3137  *
3138  *  31  30      27  26    24    22  21   16   15    12    10    5     0
3139  * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+
3140  * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn |  Rt |
3141  * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+
3142  *
3143  * Rt: the result register
3144  * Rn: base address or SP
3145  * Rs: the source register for the operation
3146  * V: vector flag (always 0 as of v8.3)
3147  * A: acquire flag
3148  * R: release flag
3149  */
disas_ldst_atomic(DisasContext * s,uint32_t insn,int size,int rt,bool is_vector)3150 static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
3151                               int size, int rt, bool is_vector)
3152 {
3153     int rs = extract32(insn, 16, 5);
3154     int rn = extract32(insn, 5, 5);
3155     int o3_opc = extract32(insn, 12, 4);
3156     bool r = extract32(insn, 22, 1);
3157     bool a = extract32(insn, 23, 1);
3158     TCGv_i64 tcg_rs, clean_addr;
3159     AtomicThreeOpFn *fn;
3160 
3161     if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
3162         unallocated_encoding(s);
3163         return;
3164     }
3165     switch (o3_opc) {
3166     case 000: /* LDADD */
3167         fn = tcg_gen_atomic_fetch_add_i64;
3168         break;
3169     case 001: /* LDCLR */
3170         fn = tcg_gen_atomic_fetch_and_i64;
3171         break;
3172     case 002: /* LDEOR */
3173         fn = tcg_gen_atomic_fetch_xor_i64;
3174         break;
3175     case 003: /* LDSET */
3176         fn = tcg_gen_atomic_fetch_or_i64;
3177         break;
3178     case 004: /* LDSMAX */
3179         fn = tcg_gen_atomic_fetch_smax_i64;
3180         break;
3181     case 005: /* LDSMIN */
3182         fn = tcg_gen_atomic_fetch_smin_i64;
3183         break;
3184     case 006: /* LDUMAX */
3185         fn = tcg_gen_atomic_fetch_umax_i64;
3186         break;
3187     case 007: /* LDUMIN */
3188         fn = tcg_gen_atomic_fetch_umin_i64;
3189         break;
3190     case 010: /* SWP */
3191         fn = tcg_gen_atomic_xchg_i64;
3192         break;
3193     case 014: /* LDAPR, LDAPRH, LDAPRB */
3194         if (!dc_isar_feature(aa64_rcpc_8_3, s) ||
3195             rs != 31 || a != 1 || r != 0) {
3196             unallocated_encoding(s);
3197             return;
3198         }
3199         break;
3200     default:
3201         unallocated_encoding(s);
3202         return;
3203     }
3204 
3205     if (rn == 31) {
3206         gen_check_sp_alignment(s);
3207     }
3208     clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
3209 
3210     if (o3_opc == 014) {
3211         /*
3212          * LDAPR* are a special case because they are a simple load, not a
3213          * fetch-and-do-something op.
3214          * The architectural consistency requirements here are weaker than
3215          * full load-acquire (we only need "load-acquire processor consistent"),
3216          * but we choose to implement them as full LDAQ.
3217          */
3218         do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false,
3219                   true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
3220         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3221         return;
3222     }
3223 
3224     tcg_rs = read_cpu_reg(s, rs, true);
3225 
3226     if (o3_opc == 1) { /* LDCLR */
3227         tcg_gen_not_i64(tcg_rs, tcg_rs);
3228     }
3229 
3230     /* The tcg atomic primitives are all full barriers.  Therefore we
3231      * can ignore the Acquire and Release bits of this instruction.
3232      */
3233     fn(cpu_reg(s, rt), clean_addr, tcg_rs, get_mem_index(s),
3234        s->be_data | size | MO_ALIGN);
3235 }
3236 
3237 /*
3238  * PAC memory operations
3239  *
3240  *  31  30      27  26    24    22  21       12  11  10    5     0
3241  * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
3242  * | size | 1 1 1 | V | 0 0 | M S | 1 |  imm9  | W | 1 | Rn |  Rt |
3243  * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
3244  *
3245  * Rt: the result register
3246  * Rn: base address or SP
3247  * V: vector flag (always 0 as of v8.3)
3248  * M: clear for key DA, set for key DB
3249  * W: pre-indexing flag
3250  * S: sign for imm9.
3251  */
disas_ldst_pac(DisasContext * s,uint32_t insn,int size,int rt,bool is_vector)3252 static void disas_ldst_pac(DisasContext *s, uint32_t insn,
3253                            int size, int rt, bool is_vector)
3254 {
3255     int rn = extract32(insn, 5, 5);
3256     bool is_wback = extract32(insn, 11, 1);
3257     bool use_key_a = !extract32(insn, 23, 1);
3258     int offset;
3259     TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3260 
3261     if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) {
3262         unallocated_encoding(s);
3263         return;
3264     }
3265 
3266     if (rn == 31) {
3267         gen_check_sp_alignment(s);
3268     }
3269     dirty_addr = read_cpu_reg_sp(s, rn, 1);
3270 
3271     if (s->pauth_active) {
3272         if (use_key_a) {
3273             gen_helper_autda(dirty_addr, cpu_env, dirty_addr, cpu_X[31]);
3274         } else {
3275             gen_helper_autdb(dirty_addr, cpu_env, dirty_addr, cpu_X[31]);
3276         }
3277     }
3278 
3279     /* Form the 10-bit signed, scaled offset.  */
3280     offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9);
3281     offset = sextract32(offset << size, 0, 10 + size);
3282     tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3283 
3284     /* Note that "clean" and "dirty" here refer to TBI not PAC.  */
3285     clean_addr = clean_data_tbi(s, dirty_addr);
3286 
3287     tcg_rt = cpu_reg(s, rt);
3288     do_gpr_ld(s, tcg_rt, clean_addr, size, /* is_signed */ false,
3289               /* extend */ false, /* iss_valid */ !is_wback,
3290               /* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false);
3291 
3292     if (is_wback) {
3293         tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
3294     }
3295 }
3296 
3297 /*
3298  * LDAPR/STLR (unscaled immediate)
3299  *
3300  *  31  30            24    22  21       12    10    5     0
3301  * +------+-------------+-----+---+--------+-----+----+-----+
3302  * | size | 0 1 1 0 0 1 | opc | 0 |  imm9  | 0 0 | Rn |  Rt |
3303  * +------+-------------+-----+---+--------+-----+----+-----+
3304  *
3305  * Rt: source or destination register
3306  * Rn: base register
3307  * imm9: unscaled immediate offset
3308  * opc: 00: STLUR*, 01/10/11: various LDAPUR*
3309  * size: size of load/store
3310  */
disas_ldst_ldapr_stlr(DisasContext * s,uint32_t insn)3311 static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
3312 {
3313     int rt = extract32(insn, 0, 5);
3314     int rn = extract32(insn, 5, 5);
3315     int offset = sextract32(insn, 12, 9);
3316     int opc = extract32(insn, 22, 2);
3317     int size = extract32(insn, 30, 2);
3318     TCGv_i64 clean_addr, dirty_addr;
3319     bool is_store = false;
3320     bool is_signed = false;
3321     bool extend = false;
3322     bool iss_sf;
3323 
3324     if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
3325         unallocated_encoding(s);
3326         return;
3327     }
3328 
3329     switch (opc) {
3330     case 0: /* STLURB */
3331         is_store = true;
3332         break;
3333     case 1: /* LDAPUR* */
3334         break;
3335     case 2: /* LDAPURS* 64-bit variant */
3336         if (size == 3) {
3337             unallocated_encoding(s);
3338             return;
3339         }
3340         is_signed = true;
3341         break;
3342     case 3: /* LDAPURS* 32-bit variant */
3343         if (size > 1) {
3344             unallocated_encoding(s);
3345             return;
3346         }
3347         is_signed = true;
3348         extend = true; /* zero-extend 32->64 after signed load */
3349         break;
3350     default:
3351         g_assert_not_reached();
3352     }
3353 
3354     iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3355 
3356     if (rn == 31) {
3357         gen_check_sp_alignment(s);
3358     }
3359 
3360     dirty_addr = read_cpu_reg_sp(s, rn, 1);
3361     tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3362     clean_addr = clean_data_tbi(s, dirty_addr);
3363 
3364     if (is_store) {
3365         /* Store-Release semantics */
3366         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
3367         do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt, iss_sf, true);
3368     } else {
3369         /*
3370          * Load-AcquirePC semantics; we implement as the slightly more
3371          * restrictive Load-Acquire.
3372          */
3373         do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, is_signed, extend,
3374                   true, rt, iss_sf, true);
3375         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3376     }
3377 }
3378 
3379 /* Load/store register (all forms) */
disas_ldst_reg(DisasContext * s,uint32_t insn)3380 static void disas_ldst_reg(DisasContext *s, uint32_t insn)
3381 {
3382     int rt = extract32(insn, 0, 5);
3383     int opc = extract32(insn, 22, 2);
3384     bool is_vector = extract32(insn, 26, 1);
3385     int size = extract32(insn, 30, 2);
3386 
3387     switch (extract32(insn, 24, 2)) {
3388     case 0:
3389         if (extract32(insn, 21, 1) == 0) {
3390             /* Load/store register (unscaled immediate)
3391              * Load/store immediate pre/post-indexed
3392              * Load/store register unprivileged
3393              */
3394             disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
3395             return;
3396         }
3397         switch (extract32(insn, 10, 2)) {
3398         case 0:
3399             disas_ldst_atomic(s, insn, size, rt, is_vector);
3400             return;
3401         case 2:
3402             disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
3403             return;
3404         default:
3405             disas_ldst_pac(s, insn, size, rt, is_vector);
3406             return;
3407         }
3408         break;
3409     case 1:
3410         disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
3411         return;
3412     }
3413     unallocated_encoding(s);
3414 }
3415 
3416 /* AdvSIMD load/store multiple structures
3417  *
3418  *  31  30  29           23 22  21         16 15    12 11  10 9    5 4    0
3419  * +---+---+---------------+---+-------------+--------+------+------+------+
3420  * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size |  Rn  |  Rt  |
3421  * +---+---+---------------+---+-------------+--------+------+------+------+
3422  *
3423  * AdvSIMD load/store multiple structures (post-indexed)
3424  *
3425  *  31  30  29           23 22  21  20     16 15    12 11  10 9    5 4    0
3426  * +---+---+---------------+---+---+---------+--------+------+------+------+
3427  * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 |   Rm    | opcode | size |  Rn  |  Rt  |
3428  * +---+---+---------------+---+---+---------+--------+------+------+------+
3429  *
3430  * Rt: first (or only) SIMD&FP register to be transferred
3431  * Rn: base address or SP
3432  * Rm (post-index only): post-index register (when !31) or size dependent #imm
3433  */
disas_ldst_multiple_struct(DisasContext * s,uint32_t insn)3434 static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
3435 {
3436     int rt = extract32(insn, 0, 5);
3437     int rn = extract32(insn, 5, 5);
3438     int rm = extract32(insn, 16, 5);
3439     int size = extract32(insn, 10, 2);
3440     int opcode = extract32(insn, 12, 4);
3441     bool is_store = !extract32(insn, 22, 1);
3442     bool is_postidx = extract32(insn, 23, 1);
3443     bool is_q = extract32(insn, 30, 1);
3444     TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3445     MemOp endian = s->be_data;
3446 
3447     int ebytes;   /* bytes per element */
3448     int elements; /* elements per vector */
3449     int rpt;    /* num iterations */
3450     int selem;  /* structure elements */
3451     int r;
3452 
3453     if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
3454         unallocated_encoding(s);
3455         return;
3456     }
3457 
3458     if (!is_postidx && rm != 0) {
3459         unallocated_encoding(s);
3460         return;
3461     }
3462 
3463     /* From the shared decode logic */
3464     switch (opcode) {
3465     case 0x0:
3466         rpt = 1;
3467         selem = 4;
3468         break;
3469     case 0x2:
3470         rpt = 4;
3471         selem = 1;
3472         break;
3473     case 0x4:
3474         rpt = 1;
3475         selem = 3;
3476         break;
3477     case 0x6:
3478         rpt = 3;
3479         selem = 1;
3480         break;
3481     case 0x7:
3482         rpt = 1;
3483         selem = 1;
3484         break;
3485     case 0x8:
3486         rpt = 1;
3487         selem = 2;
3488         break;
3489     case 0xa:
3490         rpt = 2;
3491         selem = 1;
3492         break;
3493     default:
3494         unallocated_encoding(s);
3495         return;
3496     }
3497 
3498     if (size == 3 && !is_q && selem != 1) {
3499         /* reserved */
3500         unallocated_encoding(s);
3501         return;
3502     }
3503 
3504     if (!fp_access_check(s)) {
3505         return;
3506     }
3507 
3508     if (rn == 31) {
3509         gen_check_sp_alignment(s);
3510     }
3511 
3512     /* For our purposes, bytes are always little-endian.  */
3513     if (size == 0) {
3514         endian = MO_LE;
3515     }
3516 
3517     /* Consecutive little-endian elements from a single register
3518      * can be promoted to a larger little-endian operation.
3519      */
3520     if (selem == 1 && endian == MO_LE) {
3521         size = 3;
3522     }
3523     ebytes = 1 << size;
3524     elements = (is_q ? 16 : 8) / ebytes;
3525 
3526     tcg_rn = cpu_reg_sp(s, rn);
3527     clean_addr = clean_data_tbi(s, tcg_rn);
3528     tcg_ebytes = tcg_const_i64(ebytes);
3529 
3530     for (r = 0; r < rpt; r++) {
3531         int e;
3532         for (e = 0; e < elements; e++) {
3533             int xs;
3534             for (xs = 0; xs < selem; xs++) {
3535                 int tt = (rt + r + xs) % 32;
3536                 if (is_store) {
3537                     do_vec_st(s, tt, e, clean_addr, size, endian);
3538                 } else {
3539                     do_vec_ld(s, tt, e, clean_addr, size, endian);
3540                 }
3541                 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3542             }
3543         }
3544     }
3545     tcg_temp_free_i64(tcg_ebytes);
3546 
3547     if (!is_store) {
3548         /* For non-quad operations, setting a slice of the low
3549          * 64 bits of the register clears the high 64 bits (in
3550          * the ARM ARM pseudocode this is implicit in the fact
3551          * that 'rval' is a 64 bit wide variable).
3552          * For quad operations, we might still need to zero the
3553          * high bits of SVE.
3554          */
3555         for (r = 0; r < rpt * selem; r++) {
3556             int tt = (rt + r) % 32;
3557             clear_vec_high(s, is_q, tt);
3558         }
3559     }
3560 
3561     if (is_postidx) {
3562         if (rm == 31) {
3563             tcg_gen_addi_i64(tcg_rn, tcg_rn, rpt * elements * selem * ebytes);
3564         } else {
3565             tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3566         }
3567     }
3568 }
3569 
3570 /* AdvSIMD load/store single structure
3571  *
3572  *  31  30  29           23 22 21 20       16 15 13 12  11  10 9    5 4    0
3573  * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3574  * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size |  Rn  |  Rt  |
3575  * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3576  *
3577  * AdvSIMD load/store single structure (post-indexed)
3578  *
3579  *  31  30  29           23 22 21 20       16 15 13 12  11  10 9    5 4    0
3580  * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3581  * | 0 | Q | 0 0 1 1 0 1 1 | L R |     Rm    | opc | S | size |  Rn  |  Rt  |
3582  * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3583  *
3584  * Rt: first (or only) SIMD&FP register to be transferred
3585  * Rn: base address or SP
3586  * Rm (post-index only): post-index register (when !31) or size dependent #imm
3587  * index = encoded in Q:S:size dependent on size
3588  *
3589  * lane_size = encoded in R, opc
3590  * transfer width = encoded in opc, S, size
3591  */
disas_ldst_single_struct(DisasContext * s,uint32_t insn)3592 static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
3593 {
3594     int rt = extract32(insn, 0, 5);
3595     int rn = extract32(insn, 5, 5);
3596     int rm = extract32(insn, 16, 5);
3597     int size = extract32(insn, 10, 2);
3598     int S = extract32(insn, 12, 1);
3599     int opc = extract32(insn, 13, 3);
3600     int R = extract32(insn, 21, 1);
3601     int is_load = extract32(insn, 22, 1);
3602     int is_postidx = extract32(insn, 23, 1);
3603     int is_q = extract32(insn, 30, 1);
3604 
3605     int scale = extract32(opc, 1, 2);
3606     int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
3607     bool replicate = false;
3608     int index = is_q << 3 | S << 2 | size;
3609     int ebytes, xs;
3610     TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3611 
3612     if (extract32(insn, 31, 1)) {
3613         unallocated_encoding(s);
3614         return;
3615     }
3616     if (!is_postidx && rm != 0) {
3617         unallocated_encoding(s);
3618         return;
3619     }
3620 
3621     switch (scale) {
3622     case 3:
3623         if (!is_load || S) {
3624             unallocated_encoding(s);
3625             return;
3626         }
3627         scale = size;
3628         replicate = true;
3629         break;
3630     case 0:
3631         break;
3632     case 1:
3633         if (extract32(size, 0, 1)) {
3634             unallocated_encoding(s);
3635             return;
3636         }
3637         index >>= 1;
3638         break;
3639     case 2:
3640         if (extract32(size, 1, 1)) {
3641             unallocated_encoding(s);
3642             return;
3643         }
3644         if (!extract32(size, 0, 1)) {
3645             index >>= 2;
3646         } else {
3647             if (S) {
3648                 unallocated_encoding(s);
3649                 return;
3650             }
3651             index >>= 3;
3652             scale = 3;
3653         }
3654         break;
3655     default:
3656         g_assert_not_reached();
3657     }
3658 
3659     if (!fp_access_check(s)) {
3660         return;
3661     }
3662 
3663     ebytes = 1 << scale;
3664 
3665     if (rn == 31) {
3666         gen_check_sp_alignment(s);
3667     }
3668 
3669     tcg_rn = cpu_reg_sp(s, rn);
3670     clean_addr = clean_data_tbi(s, tcg_rn);
3671     tcg_ebytes = tcg_const_i64(ebytes);
3672 
3673     for (xs = 0; xs < selem; xs++) {
3674         if (replicate) {
3675             /* Load and replicate to all elements */
3676             TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3677 
3678             tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr,
3679                                 get_mem_index(s), s->be_data + scale);
3680             tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
3681                                  (is_q + 1) * 8, vec_full_reg_size(s),
3682                                  tcg_tmp);
3683             tcg_temp_free_i64(tcg_tmp);
3684         } else {
3685             /* Load/store one element per register */
3686             if (is_load) {
3687                 do_vec_ld(s, rt, index, clean_addr, scale, s->be_data);
3688             } else {
3689                 do_vec_st(s, rt, index, clean_addr, scale, s->be_data);
3690             }
3691         }
3692         tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3693         rt = (rt + 1) % 32;
3694     }
3695     tcg_temp_free_i64(tcg_ebytes);
3696 
3697     if (is_postidx) {
3698         if (rm == 31) {
3699             tcg_gen_addi_i64(tcg_rn, tcg_rn, selem * ebytes);
3700         } else {
3701             tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3702         }
3703     }
3704 }
3705 
3706 /* Loads and stores */
disas_ldst(DisasContext * s,uint32_t insn)3707 static void disas_ldst(DisasContext *s, uint32_t insn)
3708 {
3709     switch (extract32(insn, 24, 6)) {
3710     case 0x08: /* Load/store exclusive */
3711         disas_ldst_excl(s, insn);
3712         break;
3713     case 0x18: case 0x1c: /* Load register (literal) */
3714         disas_ld_lit(s, insn);
3715         break;
3716     case 0x28: case 0x29:
3717     case 0x2c: case 0x2d: /* Load/store pair (all forms) */
3718         disas_ldst_pair(s, insn);
3719         break;
3720     case 0x38: case 0x39:
3721     case 0x3c: case 0x3d: /* Load/store register (all forms) */
3722         disas_ldst_reg(s, insn);
3723         break;
3724     case 0x0c: /* AdvSIMD load/store multiple structures */
3725         disas_ldst_multiple_struct(s, insn);
3726         break;
3727     case 0x0d: /* AdvSIMD load/store single structure */
3728         disas_ldst_single_struct(s, insn);
3729         break;
3730     case 0x19: /* LDAPR/STLR (unscaled immediate) */
3731         if (extract32(insn, 10, 2) != 0 ||
3732             extract32(insn, 21, 1) != 0) {
3733             unallocated_encoding(s);
3734             break;
3735         }
3736         disas_ldst_ldapr_stlr(s, insn);
3737         break;
3738     default:
3739         unallocated_encoding(s);
3740         break;
3741     }
3742 }
3743 
3744 /* PC-rel. addressing
3745  *   31  30   29 28       24 23                5 4    0
3746  * +----+-------+-----------+-------------------+------+
3747  * | op | immlo | 1 0 0 0 0 |       immhi       |  Rd  |
3748  * +----+-------+-----------+-------------------+------+
3749  */
disas_pc_rel_adr(DisasContext * s,uint32_t insn)3750 static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
3751 {
3752     unsigned int page, rd;
3753     uint64_t base;
3754     uint64_t offset;
3755 
3756     page = extract32(insn, 31, 1);
3757     /* SignExtend(immhi:immlo) -> offset */
3758     offset = sextract64(insn, 5, 19);
3759     offset = offset << 2 | extract32(insn, 29, 2);
3760     rd = extract32(insn, 0, 5);
3761     base = s->pc_curr;
3762 
3763     if (page) {
3764         /* ADRP (page based) */
3765         base &= ~0xfff;
3766         offset <<= 12;
3767     }
3768 
3769     tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
3770 }
3771 
3772 /*
3773  * Add/subtract (immediate)
3774  *
3775  *  31 30 29 28       24 23 22 21         10 9   5 4   0
3776  * +--+--+--+-----------+-----+-------------+-----+-----+
3777  * |sf|op| S| 1 0 0 0 1 |shift|    imm12    |  Rn | Rd  |
3778  * +--+--+--+-----------+-----+-------------+-----+-----+
3779  *
3780  *    sf: 0 -> 32bit, 1 -> 64bit
3781  *    op: 0 -> add  , 1 -> sub
3782  *     S: 1 -> set flags
3783  * shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12
3784  */
disas_add_sub_imm(DisasContext * s,uint32_t insn)3785 static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
3786 {
3787     int rd = extract32(insn, 0, 5);
3788     int rn = extract32(insn, 5, 5);
3789     uint64_t imm = extract32(insn, 10, 12);
3790     int shift = extract32(insn, 22, 2);
3791     bool setflags = extract32(insn, 29, 1);
3792     bool sub_op = extract32(insn, 30, 1);
3793     bool is_64bit = extract32(insn, 31, 1);
3794 
3795     TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
3796     TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
3797     TCGv_i64 tcg_result;
3798 
3799     switch (shift) {
3800     case 0x0:
3801         break;
3802     case 0x1:
3803         imm <<= 12;
3804         break;
3805     default:
3806         unallocated_encoding(s);
3807         return;
3808     }
3809 
3810     tcg_result = tcg_temp_new_i64();
3811     if (!setflags) {
3812         if (sub_op) {
3813             tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
3814         } else {
3815             tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
3816         }
3817     } else {
3818         TCGv_i64 tcg_imm = tcg_const_i64(imm);
3819         if (sub_op) {
3820             gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
3821         } else {
3822             gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
3823         }
3824         tcg_temp_free_i64(tcg_imm);
3825     }
3826 
3827     if (is_64bit) {
3828         tcg_gen_mov_i64(tcg_rd, tcg_result);
3829     } else {
3830         tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3831     }
3832 
3833     tcg_temp_free_i64(tcg_result);
3834 }
3835 
3836 /* The input should be a value in the bottom e bits (with higher
3837  * bits zero); returns that value replicated into every element
3838  * of size e in a 64 bit integer.
3839  */
bitfield_replicate(uint64_t mask,unsigned int e)3840 static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
3841 {
3842     assert(e != 0);
3843     while (e < 64) {
3844         mask |= mask << e;
3845         e *= 2;
3846     }
3847     return mask;
3848 }
3849 
3850 /* Return a value with the bottom len bits set (where 0 < len <= 64) */
bitmask64(unsigned int length)3851 static inline uint64_t bitmask64(unsigned int length)
3852 {
3853     assert(length > 0 && length <= 64);
3854     return ~0ULL >> (64 - length);
3855 }
3856 
3857 /* Simplified variant of pseudocode DecodeBitMasks() for the case where we
3858  * only require the wmask. Returns false if the imms/immr/immn are a reserved
3859  * value (ie should cause a guest UNDEF exception), and true if they are
3860  * valid, in which case the decoded bit pattern is written to result.
3861  */
logic_imm_decode_wmask(uint64_t * result,unsigned int immn,unsigned int imms,unsigned int immr)3862 bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
3863                             unsigned int imms, unsigned int immr)
3864 {
3865     uint64_t mask;
3866     unsigned e, levels, s, r;
3867     int len;
3868 
3869     assert(immn < 2 && imms < 64 && immr < 64);
3870 
3871     /* The bit patterns we create here are 64 bit patterns which
3872      * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
3873      * 64 bits each. Each element contains the same value: a run
3874      * of between 1 and e-1 non-zero bits, rotated within the
3875      * element by between 0 and e-1 bits.
3876      *
3877      * The element size and run length are encoded into immn (1 bit)
3878      * and imms (6 bits) as follows:
3879      * 64 bit elements: immn = 1, imms = <length of run - 1>
3880      * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
3881      * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
3882      *  8 bit elements: immn = 0, imms = 110 : <length of run - 1>
3883      *  4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
3884      *  2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
3885      * Notice that immn = 0, imms = 11111x is the only combination
3886      * not covered by one of the above options; this is reserved.
3887      * Further, <length of run - 1> all-ones is a reserved pattern.
3888      *
3889      * In all cases the rotation is by immr % e (and immr is 6 bits).
3890      */
3891 
3892     /* First determine the element size */
3893     len = 31 - clz32((immn << 6) | (~imms & 0x3f));
3894     if (len < 1) {
3895         /* This is the immn == 0, imms == 0x11111x case */
3896         return false;
3897     }
3898     e = 1 << len;
3899 
3900     levels = e - 1;
3901     s = imms & levels;
3902     r = immr & levels;
3903 
3904     if (s == levels) {
3905         /* <length of run - 1> mustn't be all-ones. */
3906         return false;
3907     }
3908 
3909     /* Create the value of one element: s+1 set bits rotated
3910      * by r within the element (which is e bits wide)...
3911      */
3912     mask = bitmask64(s + 1);
3913     if (r) {
3914         mask = (mask >> r) | (mask << (e - r));
3915         mask &= bitmask64(e);
3916     }
3917     /* ...then replicate the element over the whole 64 bit value */
3918     mask = bitfield_replicate(mask, e);
3919     *result = mask;
3920     return true;
3921 }
3922 
3923 /* Logical (immediate)
3924  *   31  30 29 28         23 22  21  16 15  10 9    5 4    0
3925  * +----+-----+-------------+---+------+------+------+------+
3926  * | sf | opc | 1 0 0 1 0 0 | N | immr | imms |  Rn  |  Rd  |
3927  * +----+-----+-------------+---+------+------+------+------+
3928  */
disas_logic_imm(DisasContext * s,uint32_t insn)3929 static void disas_logic_imm(DisasContext *s, uint32_t insn)
3930 {
3931     unsigned int sf, opc, is_n, immr, imms, rn, rd;
3932     TCGv_i64 tcg_rd, tcg_rn;
3933     uint64_t wmask;
3934     bool is_and = false;
3935 
3936     sf = extract32(insn, 31, 1);
3937     opc = extract32(insn, 29, 2);
3938     is_n = extract32(insn, 22, 1);
3939     immr = extract32(insn, 16, 6);
3940     imms = extract32(insn, 10, 6);
3941     rn = extract32(insn, 5, 5);
3942     rd = extract32(insn, 0, 5);
3943 
3944     if (!sf && is_n) {
3945         unallocated_encoding(s);
3946         return;
3947     }
3948 
3949     if (opc == 0x3) { /* ANDS */
3950         tcg_rd = cpu_reg(s, rd);
3951     } else {
3952         tcg_rd = cpu_reg_sp(s, rd);
3953     }
3954     tcg_rn = cpu_reg(s, rn);
3955 
3956     if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
3957         /* some immediate field values are reserved */
3958         unallocated_encoding(s);
3959         return;
3960     }
3961 
3962     if (!sf) {
3963         wmask &= 0xffffffff;
3964     }
3965 
3966     switch (opc) {
3967     case 0x3: /* ANDS */
3968     case 0x0: /* AND */
3969         tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
3970         is_and = true;
3971         break;
3972     case 0x1: /* ORR */
3973         tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
3974         break;
3975     case 0x2: /* EOR */
3976         tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
3977         break;
3978     default:
3979         assert(FALSE); /* must handle all above */
3980         break;
3981     }
3982 
3983     if (!sf && !is_and) {
3984         /* zero extend final result; we know we can skip this for AND
3985          * since the immediate had the high 32 bits clear.
3986          */
3987         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3988     }
3989 
3990     if (opc == 3) { /* ANDS */
3991         gen_logic_CC(sf, tcg_rd);
3992     }
3993 }
3994 
3995 /*
3996  * Move wide (immediate)
3997  *
3998  *  31 30 29 28         23 22 21 20             5 4    0
3999  * +--+-----+-------------+-----+----------------+------+
4000  * |sf| opc | 1 0 0 1 0 1 |  hw |  imm16         |  Rd  |
4001  * +--+-----+-------------+-----+----------------+------+
4002  *
4003  * sf: 0 -> 32 bit, 1 -> 64 bit
4004  * opc: 00 -> N, 10 -> Z, 11 -> K
4005  * hw: shift/16 (0,16, and sf only 32, 48)
4006  */
disas_movw_imm(DisasContext * s,uint32_t insn)4007 static void disas_movw_imm(DisasContext *s, uint32_t insn)
4008 {
4009     int rd = extract32(insn, 0, 5);
4010     uint64_t imm = extract32(insn, 5, 16);
4011     int sf = extract32(insn, 31, 1);
4012     int opc = extract32(insn, 29, 2);
4013     int pos = extract32(insn, 21, 2) << 4;
4014     TCGv_i64 tcg_rd = cpu_reg(s, rd);
4015     TCGv_i64 tcg_imm;
4016 
4017     if (!sf && (pos >= 32)) {
4018         unallocated_encoding(s);
4019         return;
4020     }
4021 
4022     switch (opc) {
4023     case 0: /* MOVN */
4024     case 2: /* MOVZ */
4025         imm <<= pos;
4026         if (opc == 0) {
4027             imm = ~imm;
4028         }
4029         if (!sf) {
4030             imm &= 0xffffffffu;
4031         }
4032         tcg_gen_movi_i64(tcg_rd, imm);
4033         break;
4034     case 3: /* MOVK */
4035         tcg_imm = tcg_const_i64(imm);
4036         tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
4037         tcg_temp_free_i64(tcg_imm);
4038         if (!sf) {
4039             tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4040         }
4041         break;
4042     default:
4043         unallocated_encoding(s);
4044         break;
4045     }
4046 }
4047 
4048 /* Bitfield
4049  *   31  30 29 28         23 22  21  16 15  10 9    5 4    0
4050  * +----+-----+-------------+---+------+------+------+------+
4051  * | sf | opc | 1 0 0 1 1 0 | N | immr | imms |  Rn  |  Rd  |
4052  * +----+-----+-------------+---+------+------+------+------+
4053  */
disas_bitfield(DisasContext * s,uint32_t insn)4054 static void disas_bitfield(DisasContext *s, uint32_t insn)
4055 {
4056     unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
4057     TCGv_i64 tcg_rd, tcg_tmp;
4058 
4059     sf = extract32(insn, 31, 1);
4060     opc = extract32(insn, 29, 2);
4061     n = extract32(insn, 22, 1);
4062     ri = extract32(insn, 16, 6);
4063     si = extract32(insn, 10, 6);
4064     rn = extract32(insn, 5, 5);
4065     rd = extract32(insn, 0, 5);
4066     bitsize = sf ? 64 : 32;
4067 
4068     if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
4069         unallocated_encoding(s);
4070         return;
4071     }
4072 
4073     tcg_rd = cpu_reg(s, rd);
4074 
4075     /* Suppress the zero-extend for !sf.  Since RI and SI are constrained
4076        to be smaller than bitsize, we'll never reference data outside the
4077        low 32-bits anyway.  */
4078     tcg_tmp = read_cpu_reg(s, rn, 1);
4079 
4080     /* Recognize simple(r) extractions.  */
4081     if (si >= ri) {
4082         /* Wd<s-r:0> = Wn<s:r> */
4083         len = (si - ri) + 1;
4084         if (opc == 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */
4085             tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
4086             goto done;
4087         } else if (opc == 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */
4088             tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
4089             return;
4090         }
4091         /* opc == 1, BFXIL fall through to deposit */
4092         tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
4093         pos = 0;
4094     } else {
4095         /* Handle the ri > si case with a deposit
4096          * Wd<32+s-r,32-r> = Wn<s:0>
4097          */
4098         len = si + 1;
4099         pos = (bitsize - ri) & (bitsize - 1);
4100     }
4101 
4102     if (opc == 0 && len < ri) {
4103         /* SBFM: sign extend the destination field from len to fill
4104            the balance of the word.  Let the deposit below insert all
4105            of those sign bits.  */
4106         tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
4107         len = ri;
4108     }
4109 
4110     if (opc == 1) { /* BFM, BFXIL */
4111         tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
4112     } else {
4113         /* SBFM or UBFM: We start with zero, and we haven't modified
4114            any bits outside bitsize, therefore the zero-extension
4115            below is unneeded.  */
4116         tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
4117         return;
4118     }
4119 
4120  done:
4121     if (!sf) { /* zero extend final result */
4122         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4123     }
4124 }
4125 
4126 /* Extract
4127  *   31  30  29 28         23 22   21  20  16 15    10 9    5 4    0
4128  * +----+------+-------------+---+----+------+--------+------+------+
4129  * | sf | op21 | 1 0 0 1 1 1 | N | o0 |  Rm  |  imms  |  Rn  |  Rd  |
4130  * +----+------+-------------+---+----+------+--------+------+------+
4131  */
disas_extract(DisasContext * s,uint32_t insn)4132 static void disas_extract(DisasContext *s, uint32_t insn)
4133 {
4134     unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
4135 
4136     sf = extract32(insn, 31, 1);
4137     n = extract32(insn, 22, 1);
4138     rm = extract32(insn, 16, 5);
4139     imm = extract32(insn, 10, 6);
4140     rn = extract32(insn, 5, 5);
4141     rd = extract32(insn, 0, 5);
4142     op21 = extract32(insn, 29, 2);
4143     op0 = extract32(insn, 21, 1);
4144     bitsize = sf ? 64 : 32;
4145 
4146     if (sf != n || op21 || op0 || imm >= bitsize) {
4147         unallocated_encoding(s);
4148     } else {
4149         TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
4150 
4151         tcg_rd = cpu_reg(s, rd);
4152 
4153         if (unlikely(imm == 0)) {
4154             /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
4155              * so an extract from bit 0 is a special case.
4156              */
4157             if (sf) {
4158                 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
4159             } else {
4160                 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
4161             }
4162         } else {
4163             tcg_rm = cpu_reg(s, rm);
4164             tcg_rn = cpu_reg(s, rn);
4165 
4166             if (sf) {
4167                 /* Specialization to ROR happens in EXTRACT2.  */
4168                 tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, imm);
4169             } else {
4170                 TCGv_i32 t0 = tcg_temp_new_i32();
4171 
4172                 tcg_gen_extrl_i64_i32(t0, tcg_rm);
4173                 if (rm == rn) {
4174                     tcg_gen_rotri_i32(t0, t0, imm);
4175                 } else {
4176                     TCGv_i32 t1 = tcg_temp_new_i32();
4177                     tcg_gen_extrl_i64_i32(t1, tcg_rn);
4178                     tcg_gen_extract2_i32(t0, t0, t1, imm);
4179                     tcg_temp_free_i32(t1);
4180                 }
4181                 tcg_gen_extu_i32_i64(tcg_rd, t0);
4182                 tcg_temp_free_i32(t0);
4183             }
4184         }
4185     }
4186 }
4187 
4188 /* Data processing - immediate */
disas_data_proc_imm(DisasContext * s,uint32_t insn)4189 static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
4190 {
4191     switch (extract32(insn, 23, 6)) {
4192     case 0x20: case 0x21: /* PC-rel. addressing */
4193         disas_pc_rel_adr(s, insn);
4194         break;
4195     case 0x22: case 0x23: /* Add/subtract (immediate) */
4196         disas_add_sub_imm(s, insn);
4197         break;
4198     case 0x24: /* Logical (immediate) */
4199         disas_logic_imm(s, insn);
4200         break;
4201     case 0x25: /* Move wide (immediate) */
4202         disas_movw_imm(s, insn);
4203         break;
4204     case 0x26: /* Bitfield */
4205         disas_bitfield(s, insn);
4206         break;
4207     case 0x27: /* Extract */
4208         disas_extract(s, insn);
4209         break;
4210     default:
4211         unallocated_encoding(s);
4212         break;
4213     }
4214 }
4215 
4216 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
4217  * Note that it is the caller's responsibility to ensure that the
4218  * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
4219  * mandated semantics for out of range shifts.
4220  */
shift_reg(TCGv_i64 dst,TCGv_i64 src,int sf,enum a64_shift_type shift_type,TCGv_i64 shift_amount)4221 static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
4222                       enum a64_shift_type shift_type, TCGv_i64 shift_amount)
4223 {
4224     switch (shift_type) {
4225     case A64_SHIFT_TYPE_LSL:
4226         tcg_gen_shl_i64(dst, src, shift_amount);
4227         break;
4228     case A64_SHIFT_TYPE_LSR:
4229         tcg_gen_shr_i64(dst, src, shift_amount);
4230         break;
4231     case A64_SHIFT_TYPE_ASR:
4232         if (!sf) {
4233             tcg_gen_ext32s_i64(dst, src);
4234         }
4235         tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
4236         break;
4237     case A64_SHIFT_TYPE_ROR:
4238         if (sf) {
4239             tcg_gen_rotr_i64(dst, src, shift_amount);
4240         } else {
4241             TCGv_i32 t0, t1;
4242             t0 = tcg_temp_new_i32();
4243             t1 = tcg_temp_new_i32();
4244             tcg_gen_extrl_i64_i32(t0, src);
4245             tcg_gen_extrl_i64_i32(t1, shift_amount);
4246             tcg_gen_rotr_i32(t0, t0, t1);
4247             tcg_gen_extu_i32_i64(dst, t0);
4248             tcg_temp_free_i32(t0);
4249             tcg_temp_free_i32(t1);
4250         }
4251         break;
4252     default:
4253         assert(FALSE); /* all shift types should be handled */
4254         break;
4255     }
4256 
4257     if (!sf) { /* zero extend final result */
4258         tcg_gen_ext32u_i64(dst, dst);
4259     }
4260 }
4261 
4262 /* Shift a TCGv src by immediate, put result in dst.
4263  * The shift amount must be in range (this should always be true as the
4264  * relevant instructions will UNDEF on bad shift immediates).
4265  */
shift_reg_imm(TCGv_i64 dst,TCGv_i64 src,int sf,enum a64_shift_type shift_type,unsigned int shift_i)4266 static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
4267                           enum a64_shift_type shift_type, unsigned int shift_i)
4268 {
4269     assert(shift_i < (sf ? 64 : 32));
4270 
4271     if (shift_i == 0) {
4272         tcg_gen_mov_i64(dst, src);
4273     } else {
4274         TCGv_i64 shift_const;
4275 
4276         shift_const = tcg_const_i64(shift_i);
4277         shift_reg(dst, src, sf, shift_type, shift_const);
4278         tcg_temp_free_i64(shift_const);
4279     }
4280 }
4281 
4282 /* Logical (shifted register)
4283  *   31  30 29 28       24 23   22 21  20  16 15    10 9    5 4    0
4284  * +----+-----+-----------+-------+---+------+--------+------+------+
4285  * | sf | opc | 0 1 0 1 0 | shift | N |  Rm  |  imm6  |  Rn  |  Rd  |
4286  * +----+-----+-----------+-------+---+------+--------+------+------+
4287  */
disas_logic_reg(DisasContext * s,uint32_t insn)4288 static void disas_logic_reg(DisasContext *s, uint32_t insn)
4289 {
4290     TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
4291     unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
4292 
4293     sf = extract32(insn, 31, 1);
4294     opc = extract32(insn, 29, 2);
4295     shift_type = extract32(insn, 22, 2);
4296     invert = extract32(insn, 21, 1);
4297     rm = extract32(insn, 16, 5);
4298     shift_amount = extract32(insn, 10, 6);
4299     rn = extract32(insn, 5, 5);
4300     rd = extract32(insn, 0, 5);
4301 
4302     if (!sf && (shift_amount & (1 << 5))) {
4303         unallocated_encoding(s);
4304         return;
4305     }
4306 
4307     tcg_rd = cpu_reg(s, rd);
4308 
4309     if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
4310         /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
4311          * register-register MOV and MVN, so it is worth special casing.
4312          */
4313         tcg_rm = cpu_reg(s, rm);
4314         if (invert) {
4315             tcg_gen_not_i64(tcg_rd, tcg_rm);
4316             if (!sf) {
4317                 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4318             }
4319         } else {
4320             if (sf) {
4321                 tcg_gen_mov_i64(tcg_rd, tcg_rm);
4322             } else {
4323                 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
4324             }
4325         }
4326         return;
4327     }
4328 
4329     tcg_rm = read_cpu_reg(s, rm, sf);
4330 
4331     if (shift_amount) {
4332         shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
4333     }
4334 
4335     tcg_rn = cpu_reg(s, rn);
4336 
4337     switch (opc | (invert << 2)) {
4338     case 0: /* AND */
4339     case 3: /* ANDS */
4340         tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
4341         break;
4342     case 1: /* ORR */
4343         tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
4344         break;
4345     case 2: /* EOR */
4346         tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
4347         break;
4348     case 4: /* BIC */
4349     case 7: /* BICS */
4350         tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
4351         break;
4352     case 5: /* ORN */
4353         tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
4354         break;
4355     case 6: /* EON */
4356         tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
4357         break;
4358     default:
4359         assert(FALSE);
4360         break;
4361     }
4362 
4363     if (!sf) {
4364         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4365     }
4366 
4367     if (opc == 3) {
4368         gen_logic_CC(sf, tcg_rd);
4369     }
4370 }
4371 
4372 /*
4373  * Add/subtract (extended register)
4374  *
4375  *  31|30|29|28       24|23 22|21|20   16|15  13|12  10|9  5|4  0|
4376  * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4377  * |sf|op| S| 0 1 0 1 1 | opt | 1|  Rm   |option| imm3 | Rn | Rd |
4378  * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4379  *
4380  *  sf: 0 -> 32bit, 1 -> 64bit
4381  *  op: 0 -> add  , 1 -> sub
4382  *   S: 1 -> set flags
4383  * opt: 00
4384  * option: extension type (see DecodeRegExtend)
4385  * imm3: optional shift to Rm
4386  *
4387  * Rd = Rn + LSL(extend(Rm), amount)
4388  */
disas_add_sub_ext_reg(DisasContext * s,uint32_t insn)4389 static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
4390 {
4391     int rd = extract32(insn, 0, 5);
4392     int rn = extract32(insn, 5, 5);
4393     int imm3 = extract32(insn, 10, 3);
4394     int option = extract32(insn, 13, 3);
4395     int rm = extract32(insn, 16, 5);
4396     int opt = extract32(insn, 22, 2);
4397     bool setflags = extract32(insn, 29, 1);
4398     bool sub_op = extract32(insn, 30, 1);
4399     bool sf = extract32(insn, 31, 1);
4400 
4401     TCGv_i64 tcg_rm, tcg_rn; /* temps */
4402     TCGv_i64 tcg_rd;
4403     TCGv_i64 tcg_result;
4404 
4405     if (imm3 > 4 || opt != 0) {
4406         unallocated_encoding(s);
4407         return;
4408     }
4409 
4410     /* non-flag setting ops may use SP */
4411     if (!setflags) {
4412         tcg_rd = cpu_reg_sp(s, rd);
4413     } else {
4414         tcg_rd = cpu_reg(s, rd);
4415     }
4416     tcg_rn = read_cpu_reg_sp(s, rn, sf);
4417 
4418     tcg_rm = read_cpu_reg(s, rm, sf);
4419     ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
4420 
4421     tcg_result = tcg_temp_new_i64();
4422 
4423     if (!setflags) {
4424         if (sub_op) {
4425             tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4426         } else {
4427             tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4428         }
4429     } else {
4430         if (sub_op) {
4431             gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4432         } else {
4433             gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4434         }
4435     }
4436 
4437     if (sf) {
4438         tcg_gen_mov_i64(tcg_rd, tcg_result);
4439     } else {
4440         tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4441     }
4442 
4443     tcg_temp_free_i64(tcg_result);
4444 }
4445 
4446 /*
4447  * Add/subtract (shifted register)
4448  *
4449  *  31 30 29 28       24 23 22 21 20   16 15     10 9    5 4    0
4450  * +--+--+--+-----------+-----+--+-------+---------+------+------+
4451  * |sf|op| S| 0 1 0 1 1 |shift| 0|  Rm   |  imm6   |  Rn  |  Rd  |
4452  * +--+--+--+-----------+-----+--+-------+---------+------+------+
4453  *
4454  *    sf: 0 -> 32bit, 1 -> 64bit
4455  *    op: 0 -> add  , 1 -> sub
4456  *     S: 1 -> set flags
4457  * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
4458  *  imm6: Shift amount to apply to Rm before the add/sub
4459  */
disas_add_sub_reg(DisasContext * s,uint32_t insn)4460 static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
4461 {
4462     int rd = extract32(insn, 0, 5);
4463     int rn = extract32(insn, 5, 5);
4464     int imm6 = extract32(insn, 10, 6);
4465     int rm = extract32(insn, 16, 5);
4466     int shift_type = extract32(insn, 22, 2);
4467     bool setflags = extract32(insn, 29, 1);
4468     bool sub_op = extract32(insn, 30, 1);
4469     bool sf = extract32(insn, 31, 1);
4470 
4471     TCGv_i64 tcg_rd = cpu_reg(s, rd);
4472     TCGv_i64 tcg_rn, tcg_rm;
4473     TCGv_i64 tcg_result;
4474 
4475     if ((shift_type == 3) || (!sf && (imm6 > 31))) {
4476         unallocated_encoding(s);
4477         return;
4478     }
4479 
4480     tcg_rn = read_cpu_reg(s, rn, sf);
4481     tcg_rm = read_cpu_reg(s, rm, sf);
4482 
4483     shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
4484 
4485     tcg_result = tcg_temp_new_i64();
4486 
4487     if (!setflags) {
4488         if (sub_op) {
4489             tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4490         } else {
4491             tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4492         }
4493     } else {
4494         if (sub_op) {
4495             gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4496         } else {
4497             gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4498         }
4499     }
4500 
4501     if (sf) {
4502         tcg_gen_mov_i64(tcg_rd, tcg_result);
4503     } else {
4504         tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4505     }
4506 
4507     tcg_temp_free_i64(tcg_result);
4508 }
4509 
4510 /* Data-processing (3 source)
4511  *
4512  *    31 30  29 28       24 23 21  20  16  15  14  10 9    5 4    0
4513  *  +--+------+-----------+------+------+----+------+------+------+
4514  *  |sf| op54 | 1 1 0 1 1 | op31 |  Rm  | o0 |  Ra  |  Rn  |  Rd  |
4515  *  +--+------+-----------+------+------+----+------+------+------+
4516  */
disas_data_proc_3src(DisasContext * s,uint32_t insn)4517 static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
4518 {
4519     int rd = extract32(insn, 0, 5);
4520     int rn = extract32(insn, 5, 5);
4521     int ra = extract32(insn, 10, 5);
4522     int rm = extract32(insn, 16, 5);
4523     int op_id = (extract32(insn, 29, 3) << 4) |
4524         (extract32(insn, 21, 3) << 1) |
4525         extract32(insn, 15, 1);
4526     bool sf = extract32(insn, 31, 1);
4527     bool is_sub = extract32(op_id, 0, 1);
4528     bool is_high = extract32(op_id, 2, 1);
4529     bool is_signed = false;
4530     TCGv_i64 tcg_op1;
4531     TCGv_i64 tcg_op2;
4532     TCGv_i64 tcg_tmp;
4533 
4534     /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
4535     switch (op_id) {
4536     case 0x42: /* SMADDL */
4537     case 0x43: /* SMSUBL */
4538     case 0x44: /* SMULH */
4539         is_signed = true;
4540         break;
4541     case 0x0: /* MADD (32bit) */
4542     case 0x1: /* MSUB (32bit) */
4543     case 0x40: /* MADD (64bit) */
4544     case 0x41: /* MSUB (64bit) */
4545     case 0x4a: /* UMADDL */
4546     case 0x4b: /* UMSUBL */
4547     case 0x4c: /* UMULH */
4548         break;
4549     default:
4550         unallocated_encoding(s);
4551         return;
4552     }
4553 
4554     if (is_high) {
4555         TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
4556         TCGv_i64 tcg_rd = cpu_reg(s, rd);
4557         TCGv_i64 tcg_rn = cpu_reg(s, rn);
4558         TCGv_i64 tcg_rm = cpu_reg(s, rm);
4559 
4560         if (is_signed) {
4561             tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4562         } else {
4563             tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4564         }
4565 
4566         tcg_temp_free_i64(low_bits);
4567         return;
4568     }
4569 
4570     tcg_op1 = tcg_temp_new_i64();
4571     tcg_op2 = tcg_temp_new_i64();
4572     tcg_tmp = tcg_temp_new_i64();
4573 
4574     if (op_id < 0x42) {
4575         tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
4576         tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
4577     } else {
4578         if (is_signed) {
4579             tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
4580             tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
4581         } else {
4582             tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
4583             tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
4584         }
4585     }
4586 
4587     if (ra == 31 && !is_sub) {
4588         /* Special-case MADD with rA == XZR; it is the standard MUL alias */
4589         tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
4590     } else {
4591         tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
4592         if (is_sub) {
4593             tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4594         } else {
4595             tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4596         }
4597     }
4598 
4599     if (!sf) {
4600         tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
4601     }
4602 
4603     tcg_temp_free_i64(tcg_op1);
4604     tcg_temp_free_i64(tcg_op2);
4605     tcg_temp_free_i64(tcg_tmp);
4606 }
4607 
4608 /* Add/subtract (with carry)
4609  *  31 30 29 28 27 26 25 24 23 22 21  20  16  15       10  9    5 4   0
4610  * +--+--+--+------------------------+------+-------------+------+-----+
4611  * |sf|op| S| 1  1  0  1  0  0  0  0 |  rm  | 0 0 0 0 0 0 |  Rn  |  Rd |
4612  * +--+--+--+------------------------+------+-------------+------+-----+
4613  */
4614 
disas_adc_sbc(DisasContext * s,uint32_t insn)4615 static void disas_adc_sbc(DisasContext *s, uint32_t insn)
4616 {
4617     unsigned int sf, op, setflags, rm, rn, rd;
4618     TCGv_i64 tcg_y, tcg_rn, tcg_rd;
4619 
4620     sf = extract32(insn, 31, 1);
4621     op = extract32(insn, 30, 1);
4622     setflags = extract32(insn, 29, 1);
4623     rm = extract32(insn, 16, 5);
4624     rn = extract32(insn, 5, 5);
4625     rd = extract32(insn, 0, 5);
4626 
4627     tcg_rd = cpu_reg(s, rd);
4628     tcg_rn = cpu_reg(s, rn);
4629 
4630     if (op) {
4631         tcg_y = new_tmp_a64(s);
4632         tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
4633     } else {
4634         tcg_y = cpu_reg(s, rm);
4635     }
4636 
4637     if (setflags) {
4638         gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
4639     } else {
4640         gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
4641     }
4642 }
4643 
4644 /*
4645  * Rotate right into flags
4646  *  31 30 29                21       15          10      5  4      0
4647  * +--+--+--+-----------------+--------+-----------+------+--+------+
4648  * |sf|op| S| 1 1 0 1 0 0 0 0 |  imm6  | 0 0 0 0 1 |  Rn  |o2| mask |
4649  * +--+--+--+-----------------+--------+-----------+------+--+------+
4650  */
disas_rotate_right_into_flags(DisasContext * s,uint32_t insn)4651 static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn)
4652 {
4653     int mask = extract32(insn, 0, 4);
4654     int o2 = extract32(insn, 4, 1);
4655     int rn = extract32(insn, 5, 5);
4656     int imm6 = extract32(insn, 15, 6);
4657     int sf_op_s = extract32(insn, 29, 3);
4658     TCGv_i64 tcg_rn;
4659     TCGv_i32 nzcv;
4660 
4661     if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) {
4662         unallocated_encoding(s);
4663         return;
4664     }
4665 
4666     tcg_rn = read_cpu_reg(s, rn, 1);
4667     tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6);
4668 
4669     nzcv = tcg_temp_new_i32();
4670     tcg_gen_extrl_i64_i32(nzcv, tcg_rn);
4671 
4672     if (mask & 8) { /* N */
4673         tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3);
4674     }
4675     if (mask & 4) { /* Z */
4676         tcg_gen_not_i32(cpu_ZF, nzcv);
4677         tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4);
4678     }
4679     if (mask & 2) { /* C */
4680         tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1);
4681     }
4682     if (mask & 1) { /* V */
4683         tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0);
4684     }
4685 
4686     tcg_temp_free_i32(nzcv);
4687 }
4688 
4689 /*
4690  * Evaluate into flags
4691  *  31 30 29                21        15   14        10      5  4      0
4692  * +--+--+--+-----------------+---------+----+---------+------+--+------+
4693  * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 |  Rn  |o3| mask |
4694  * +--+--+--+-----------------+---------+----+---------+------+--+------+
4695  */
disas_evaluate_into_flags(DisasContext * s,uint32_t insn)4696 static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn)
4697 {
4698     int o3_mask = extract32(insn, 0, 5);
4699     int rn = extract32(insn, 5, 5);
4700     int o2 = extract32(insn, 15, 6);
4701     int sz = extract32(insn, 14, 1);
4702     int sf_op_s = extract32(insn, 29, 3);
4703     TCGv_i32 tmp;
4704     int shift;
4705 
4706     if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd ||
4707         !dc_isar_feature(aa64_condm_4, s)) {
4708         unallocated_encoding(s);
4709         return;
4710     }
4711     shift = sz ? 16 : 24;  /* SETF16 or SETF8 */
4712 
4713     tmp = tcg_temp_new_i32();
4714     tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn));
4715     tcg_gen_shli_i32(cpu_NF, tmp, shift);
4716     tcg_gen_shli_i32(cpu_VF, tmp, shift - 1);
4717     tcg_gen_mov_i32(cpu_ZF, cpu_NF);
4718     tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF);
4719     tcg_temp_free_i32(tmp);
4720 }
4721 
4722 /* Conditional compare (immediate / register)
4723  *  31 30 29 28 27 26 25 24 23 22 21  20    16 15  12  11  10  9   5  4 3   0
4724  * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4725  * |sf|op| S| 1  1  0  1  0  0  1  0 |imm5/rm | cond |i/r |o2|  Rn  |o3|nzcv |
4726  * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4727  *        [1]                             y                [0]       [0]
4728  */
disas_cc(DisasContext * s,uint32_t insn)4729 static void disas_cc(DisasContext *s, uint32_t insn)
4730 {
4731     unsigned int sf, op, y, cond, rn, nzcv, is_imm;
4732     TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
4733     TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
4734     DisasCompare c;
4735 
4736     if (!extract32(insn, 29, 1)) {
4737         unallocated_encoding(s);
4738         return;
4739     }
4740     if (insn & (1 << 10 | 1 << 4)) {
4741         unallocated_encoding(s);
4742         return;
4743     }
4744     sf = extract32(insn, 31, 1);
4745     op = extract32(insn, 30, 1);
4746     is_imm = extract32(insn, 11, 1);
4747     y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
4748     cond = extract32(insn, 12, 4);
4749     rn = extract32(insn, 5, 5);
4750     nzcv = extract32(insn, 0, 4);
4751 
4752     /* Set T0 = !COND.  */
4753     tcg_t0 = tcg_temp_new_i32();
4754     arm_test_cc(&c, cond);
4755     tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
4756     arm_free_cc(&c);
4757 
4758     /* Load the arguments for the new comparison.  */
4759     if (is_imm) {
4760         tcg_y = new_tmp_a64(s);
4761         tcg_gen_movi_i64(tcg_y, y);
4762     } else {
4763         tcg_y = cpu_reg(s, y);
4764     }
4765     tcg_rn = cpu_reg(s, rn);
4766 
4767     /* Set the flags for the new comparison.  */
4768     tcg_tmp = tcg_temp_new_i64();
4769     if (op) {
4770         gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
4771     } else {
4772         gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
4773     }
4774     tcg_temp_free_i64(tcg_tmp);
4775 
4776     /* If COND was false, force the flags to #nzcv.  Compute two masks
4777      * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
4778      * For tcg hosts that support ANDC, we can make do with just T1.
4779      * In either case, allow the tcg optimizer to delete any unused mask.
4780      */
4781     tcg_t1 = tcg_temp_new_i32();
4782     tcg_t2 = tcg_temp_new_i32();
4783     tcg_gen_neg_i32(tcg_t1, tcg_t0);
4784     tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
4785 
4786     if (nzcv & 8) { /* N */
4787         tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
4788     } else {
4789         if (TCG_TARGET_HAS_andc_i32) {
4790             tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
4791         } else {
4792             tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
4793         }
4794     }
4795     if (nzcv & 4) { /* Z */
4796         if (TCG_TARGET_HAS_andc_i32) {
4797             tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
4798         } else {
4799             tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
4800         }
4801     } else {
4802         tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
4803     }
4804     if (nzcv & 2) { /* C */
4805         tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
4806     } else {
4807         if (TCG_TARGET_HAS_andc_i32) {
4808             tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
4809         } else {
4810             tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
4811         }
4812     }
4813     if (nzcv & 1) { /* V */
4814         tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
4815     } else {
4816         if (TCG_TARGET_HAS_andc_i32) {
4817             tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
4818         } else {
4819             tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
4820         }
4821     }
4822     tcg_temp_free_i32(tcg_t0);
4823     tcg_temp_free_i32(tcg_t1);
4824     tcg_temp_free_i32(tcg_t2);
4825 }
4826 
4827 /* Conditional select
4828  *   31   30  29  28             21 20  16 15  12 11 10 9    5 4    0
4829  * +----+----+---+-----------------+------+------+-----+------+------+
4830  * | sf | op | S | 1 1 0 1 0 1 0 0 |  Rm  | cond | op2 |  Rn  |  Rd  |
4831  * +----+----+---+-----------------+------+------+-----+------+------+
4832  */
disas_cond_select(DisasContext * s,uint32_t insn)4833 static void disas_cond_select(DisasContext *s, uint32_t insn)
4834 {
4835     unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
4836     TCGv_i64 tcg_rd, zero;
4837     DisasCompare64 c;
4838 
4839     if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
4840         /* S == 1 or op2<1> == 1 */
4841         unallocated_encoding(s);
4842         return;
4843     }
4844     sf = extract32(insn, 31, 1);
4845     else_inv = extract32(insn, 30, 1);
4846     rm = extract32(insn, 16, 5);
4847     cond = extract32(insn, 12, 4);
4848     else_inc = extract32(insn, 10, 1);
4849     rn = extract32(insn, 5, 5);
4850     rd = extract32(insn, 0, 5);
4851 
4852     tcg_rd = cpu_reg(s, rd);
4853 
4854     a64_test_cc(&c, cond);
4855     zero = tcg_const_i64(0);
4856 
4857     if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
4858         /* CSET & CSETM.  */
4859         tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
4860         if (else_inv) {
4861             tcg_gen_neg_i64(tcg_rd, tcg_rd);
4862         }
4863     } else {
4864         TCGv_i64 t_true = cpu_reg(s, rn);
4865         TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
4866         if (else_inv && else_inc) {
4867             tcg_gen_neg_i64(t_false, t_false);
4868         } else if (else_inv) {
4869             tcg_gen_not_i64(t_false, t_false);
4870         } else if (else_inc) {
4871             tcg_gen_addi_i64(t_false, t_false, 1);
4872         }
4873         tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
4874     }
4875 
4876     tcg_temp_free_i64(zero);
4877     a64_free_cc(&c);
4878 
4879     if (!sf) {
4880         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4881     }
4882 }
4883 
handle_clz(DisasContext * s,unsigned int sf,unsigned int rn,unsigned int rd)4884 static void handle_clz(DisasContext *s, unsigned int sf,
4885                        unsigned int rn, unsigned int rd)
4886 {
4887     TCGv_i64 tcg_rd, tcg_rn;
4888     tcg_rd = cpu_reg(s, rd);
4889     tcg_rn = cpu_reg(s, rn);
4890 
4891     if (sf) {
4892         tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
4893     } else {
4894         TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4895         tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4896         tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
4897         tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4898         tcg_temp_free_i32(tcg_tmp32);
4899     }
4900 }
4901 
handle_cls(DisasContext * s,unsigned int sf,unsigned int rn,unsigned int rd)4902 static void handle_cls(DisasContext *s, unsigned int sf,
4903                        unsigned int rn, unsigned int rd)
4904 {
4905     TCGv_i64 tcg_rd, tcg_rn;
4906     tcg_rd = cpu_reg(s, rd);
4907     tcg_rn = cpu_reg(s, rn);
4908 
4909     if (sf) {
4910         tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
4911     } else {
4912         TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4913         tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4914         tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
4915         tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4916         tcg_temp_free_i32(tcg_tmp32);
4917     }
4918 }
4919 
handle_rbit(DisasContext * s,unsigned int sf,unsigned int rn,unsigned int rd)4920 static void handle_rbit(DisasContext *s, unsigned int sf,
4921                         unsigned int rn, unsigned int rd)
4922 {
4923     TCGv_i64 tcg_rd, tcg_rn;
4924     tcg_rd = cpu_reg(s, rd);
4925     tcg_rn = cpu_reg(s, rn);
4926 
4927     if (sf) {
4928         gen_helper_rbit64(tcg_rd, tcg_rn);
4929     } else {
4930         TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4931         tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4932         gen_helper_rbit(tcg_tmp32, tcg_tmp32);
4933         tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4934         tcg_temp_free_i32(tcg_tmp32);
4935     }
4936 }
4937 
4938 /* REV with sf==1, opcode==3 ("REV64") */
handle_rev64(DisasContext * s,unsigned int sf,unsigned int rn,unsigned int rd)4939 static void handle_rev64(DisasContext *s, unsigned int sf,
4940                          unsigned int rn, unsigned int rd)
4941 {
4942     if (!sf) {
4943         unallocated_encoding(s);
4944         return;
4945     }
4946     tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
4947 }
4948 
4949 /* REV with sf==0, opcode==2
4950  * REV32 (sf==1, opcode==2)
4951  */
handle_rev32(DisasContext * s,unsigned int sf,unsigned int rn,unsigned int rd)4952 static void handle_rev32(DisasContext *s, unsigned int sf,
4953                          unsigned int rn, unsigned int rd)
4954 {
4955     TCGv_i64 tcg_rd = cpu_reg(s, rd);
4956 
4957     if (sf) {
4958         TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4959         TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4960 
4961         /* bswap32_i64 requires zero high word */
4962         tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
4963         tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
4964         tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
4965         tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
4966         tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
4967 
4968         tcg_temp_free_i64(tcg_tmp);
4969     } else {
4970         tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
4971         tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
4972     }
4973 }
4974 
4975 /* REV16 (opcode==1) */
handle_rev16(DisasContext * s,unsigned int sf,unsigned int rn,unsigned int rd)4976 static void handle_rev16(DisasContext *s, unsigned int sf,
4977                          unsigned int rn, unsigned int rd)
4978 {
4979     TCGv_i64 tcg_rd = cpu_reg(s, rd);
4980     TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4981     TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4982     TCGv_i64 mask = tcg_const_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
4983 
4984     tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
4985     tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
4986     tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
4987     tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
4988     tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
4989 
4990     tcg_temp_free_i64(mask);
4991     tcg_temp_free_i64(tcg_tmp);
4992 }
4993 
4994 /* Data-processing (1 source)
4995  *   31  30  29  28             21 20     16 15    10 9    5 4    0
4996  * +----+---+---+-----------------+---------+--------+------+------+
4997  * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode |  Rn  |  Rd  |
4998  * +----+---+---+-----------------+---------+--------+------+------+
4999  */
disas_data_proc_1src(DisasContext * s,uint32_t insn)5000 static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
5001 {
5002     unsigned int sf, opcode, opcode2, rn, rd;
5003     TCGv_i64 tcg_rd;
5004 
5005     if (extract32(insn, 29, 1)) {
5006         unallocated_encoding(s);
5007         return;
5008     }
5009 
5010     sf = extract32(insn, 31, 1);
5011     opcode = extract32(insn, 10, 6);
5012     opcode2 = extract32(insn, 16, 5);
5013     rn = extract32(insn, 5, 5);
5014     rd = extract32(insn, 0, 5);
5015 
5016 #define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
5017 
5018     switch (MAP(sf, opcode2, opcode)) {
5019     case MAP(0, 0x00, 0x00): /* RBIT */
5020     case MAP(1, 0x00, 0x00):
5021         handle_rbit(s, sf, rn, rd);
5022         break;
5023     case MAP(0, 0x00, 0x01): /* REV16 */
5024     case MAP(1, 0x00, 0x01):
5025         handle_rev16(s, sf, rn, rd);
5026         break;
5027     case MAP(0, 0x00, 0x02): /* REV/REV32 */
5028     case MAP(1, 0x00, 0x02):
5029         handle_rev32(s, sf, rn, rd);
5030         break;
5031     case MAP(1, 0x00, 0x03): /* REV64 */
5032         handle_rev64(s, sf, rn, rd);
5033         break;
5034     case MAP(0, 0x00, 0x04): /* CLZ */
5035     case MAP(1, 0x00, 0x04):
5036         handle_clz(s, sf, rn, rd);
5037         break;
5038     case MAP(0, 0x00, 0x05): /* CLS */
5039     case MAP(1, 0x00, 0x05):
5040         handle_cls(s, sf, rn, rd);
5041         break;
5042     case MAP(1, 0x01, 0x00): /* PACIA */
5043         if (s->pauth_active) {
5044             tcg_rd = cpu_reg(s, rd);
5045             gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5046         } else if (!dc_isar_feature(aa64_pauth, s)) {
5047             goto do_unallocated;
5048         }
5049         break;
5050     case MAP(1, 0x01, 0x01): /* PACIB */
5051         if (s->pauth_active) {
5052             tcg_rd = cpu_reg(s, rd);
5053             gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5054         } else if (!dc_isar_feature(aa64_pauth, s)) {
5055             goto do_unallocated;
5056         }
5057         break;
5058     case MAP(1, 0x01, 0x02): /* PACDA */
5059         if (s->pauth_active) {
5060             tcg_rd = cpu_reg(s, rd);
5061             gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5062         } else if (!dc_isar_feature(aa64_pauth, s)) {
5063             goto do_unallocated;
5064         }
5065         break;
5066     case MAP(1, 0x01, 0x03): /* PACDB */
5067         if (s->pauth_active) {
5068             tcg_rd = cpu_reg(s, rd);
5069             gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5070         } else if (!dc_isar_feature(aa64_pauth, s)) {
5071             goto do_unallocated;
5072         }
5073         break;
5074     case MAP(1, 0x01, 0x04): /* AUTIA */
5075         if (s->pauth_active) {
5076             tcg_rd = cpu_reg(s, rd);
5077             gen_helper_autia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5078         } else if (!dc_isar_feature(aa64_pauth, s)) {
5079             goto do_unallocated;
5080         }
5081         break;
5082     case MAP(1, 0x01, 0x05): /* AUTIB */
5083         if (s->pauth_active) {
5084             tcg_rd = cpu_reg(s, rd);
5085             gen_helper_autib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5086         } else if (!dc_isar_feature(aa64_pauth, s)) {
5087             goto do_unallocated;
5088         }
5089         break;
5090     case MAP(1, 0x01, 0x06): /* AUTDA */
5091         if (s->pauth_active) {
5092             tcg_rd = cpu_reg(s, rd);
5093             gen_helper_autda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5094         } else if (!dc_isar_feature(aa64_pauth, s)) {
5095             goto do_unallocated;
5096         }
5097         break;
5098     case MAP(1, 0x01, 0x07): /* AUTDB */
5099         if (s->pauth_active) {
5100             tcg_rd = cpu_reg(s, rd);
5101             gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5102         } else if (!dc_isar_feature(aa64_pauth, s)) {
5103             goto do_unallocated;
5104         }
5105         break;
5106     case MAP(1, 0x01, 0x08): /* PACIZA */
5107         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5108             goto do_unallocated;
5109         } else if (s->pauth_active) {
5110             tcg_rd = cpu_reg(s, rd);
5111             gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5112         }
5113         break;
5114     case MAP(1, 0x01, 0x09): /* PACIZB */
5115         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5116             goto do_unallocated;
5117         } else if (s->pauth_active) {
5118             tcg_rd = cpu_reg(s, rd);
5119             gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5120         }
5121         break;
5122     case MAP(1, 0x01, 0x0a): /* PACDZA */
5123         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5124             goto do_unallocated;
5125         } else if (s->pauth_active) {
5126             tcg_rd = cpu_reg(s, rd);
5127             gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5128         }
5129         break;
5130     case MAP(1, 0x01, 0x0b): /* PACDZB */
5131         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5132             goto do_unallocated;
5133         } else if (s->pauth_active) {
5134             tcg_rd = cpu_reg(s, rd);
5135             gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5136         }
5137         break;
5138     case MAP(1, 0x01, 0x0c): /* AUTIZA */
5139         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5140             goto do_unallocated;
5141         } else if (s->pauth_active) {
5142             tcg_rd = cpu_reg(s, rd);
5143             gen_helper_autia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5144         }
5145         break;
5146     case MAP(1, 0x01, 0x0d): /* AUTIZB */
5147         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5148             goto do_unallocated;
5149         } else if (s->pauth_active) {
5150             tcg_rd = cpu_reg(s, rd);
5151             gen_helper_autib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5152         }
5153         break;
5154     case MAP(1, 0x01, 0x0e): /* AUTDZA */
5155         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5156             goto do_unallocated;
5157         } else if (s->pauth_active) {
5158             tcg_rd = cpu_reg(s, rd);
5159             gen_helper_autda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5160         }
5161         break;
5162     case MAP(1, 0x01, 0x0f): /* AUTDZB */
5163         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5164             goto do_unallocated;
5165         } else if (s->pauth_active) {
5166             tcg_rd = cpu_reg(s, rd);
5167             gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5168         }
5169         break;
5170     case MAP(1, 0x01, 0x10): /* XPACI */
5171         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5172             goto do_unallocated;
5173         } else if (s->pauth_active) {
5174             tcg_rd = cpu_reg(s, rd);
5175             gen_helper_xpaci(tcg_rd, cpu_env, tcg_rd);
5176         }
5177         break;
5178     case MAP(1, 0x01, 0x11): /* XPACD */
5179         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5180             goto do_unallocated;
5181         } else if (s->pauth_active) {
5182             tcg_rd = cpu_reg(s, rd);
5183             gen_helper_xpacd(tcg_rd, cpu_env, tcg_rd);
5184         }
5185         break;
5186     default:
5187     do_unallocated:
5188         unallocated_encoding(s);
5189         break;
5190     }
5191 
5192 #undef MAP
5193 }
5194 
handle_div(DisasContext * s,bool is_signed,unsigned int sf,unsigned int rm,unsigned int rn,unsigned int rd)5195 static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
5196                        unsigned int rm, unsigned int rn, unsigned int rd)
5197 {
5198     TCGv_i64 tcg_n, tcg_m, tcg_rd;
5199     tcg_rd = cpu_reg(s, rd);
5200 
5201     if (!sf && is_signed) {
5202         tcg_n = new_tmp_a64(s);
5203         tcg_m = new_tmp_a64(s);
5204         tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
5205         tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
5206     } else {
5207         tcg_n = read_cpu_reg(s, rn, sf);
5208         tcg_m = read_cpu_reg(s, rm, sf);
5209     }
5210 
5211     if (is_signed) {
5212         gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
5213     } else {
5214         gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
5215     }
5216 
5217     if (!sf) { /* zero extend final result */
5218         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5219     }
5220 }
5221 
5222 /* LSLV, LSRV, ASRV, RORV */
handle_shift_reg(DisasContext * s,enum a64_shift_type shift_type,unsigned int sf,unsigned int rm,unsigned int rn,unsigned int rd)5223 static void handle_shift_reg(DisasContext *s,
5224                              enum a64_shift_type shift_type, unsigned int sf,
5225                              unsigned int rm, unsigned int rn, unsigned int rd)
5226 {
5227     TCGv_i64 tcg_shift = tcg_temp_new_i64();
5228     TCGv_i64 tcg_rd = cpu_reg(s, rd);
5229     TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5230 
5231     tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
5232     shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
5233     tcg_temp_free_i64(tcg_shift);
5234 }
5235 
5236 /* CRC32[BHWX], CRC32C[BHWX] */
handle_crc32(DisasContext * s,unsigned int sf,unsigned int sz,bool crc32c,unsigned int rm,unsigned int rn,unsigned int rd)5237 static void handle_crc32(DisasContext *s,
5238                          unsigned int sf, unsigned int sz, bool crc32c,
5239                          unsigned int rm, unsigned int rn, unsigned int rd)
5240 {
5241     TCGv_i64 tcg_acc, tcg_val;
5242     TCGv_i32 tcg_bytes;
5243 
5244     if (!dc_isar_feature(aa64_crc32, s)
5245         || (sf == 1 && sz != 3)
5246         || (sf == 0 && sz == 3)) {
5247         unallocated_encoding(s);
5248         return;
5249     }
5250 
5251     if (sz == 3) {
5252         tcg_val = cpu_reg(s, rm);
5253     } else {
5254         uint64_t mask;
5255         switch (sz) {
5256         case 0:
5257             mask = 0xFF;
5258             break;
5259         case 1:
5260             mask = 0xFFFF;
5261             break;
5262         case 2:
5263             mask = 0xFFFFFFFF;
5264             break;
5265         default:
5266             g_assert_not_reached();
5267         }
5268         tcg_val = new_tmp_a64(s);
5269         tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
5270     }
5271 
5272     tcg_acc = cpu_reg(s, rn);
5273     tcg_bytes = tcg_const_i32(1 << sz);
5274 
5275     if (crc32c) {
5276         gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5277     } else {
5278         gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5279     }
5280 
5281     tcg_temp_free_i32(tcg_bytes);
5282 }
5283 
5284 /* Data-processing (2 source)
5285  *   31   30  29 28             21 20  16 15    10 9    5 4    0
5286  * +----+---+---+-----------------+------+--------+------+------+
5287  * | sf | 0 | S | 1 1 0 1 0 1 1 0 |  Rm  | opcode |  Rn  |  Rd  |
5288  * +----+---+---+-----------------+------+--------+------+------+
5289  */
disas_data_proc_2src(DisasContext * s,uint32_t insn)5290 static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
5291 {
5292     unsigned int sf, rm, opcode, rn, rd;
5293     sf = extract32(insn, 31, 1);
5294     rm = extract32(insn, 16, 5);
5295     opcode = extract32(insn, 10, 6);
5296     rn = extract32(insn, 5, 5);
5297     rd = extract32(insn, 0, 5);
5298 
5299     if (extract32(insn, 29, 1)) {
5300         unallocated_encoding(s);
5301         return;
5302     }
5303 
5304     switch (opcode) {
5305     case 2: /* UDIV */
5306         handle_div(s, false, sf, rm, rn, rd);
5307         break;
5308     case 3: /* SDIV */
5309         handle_div(s, true, sf, rm, rn, rd);
5310         break;
5311     case 8: /* LSLV */
5312         handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
5313         break;
5314     case 9: /* LSRV */
5315         handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
5316         break;
5317     case 10: /* ASRV */
5318         handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
5319         break;
5320     case 11: /* RORV */
5321         handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
5322         break;
5323     case 12: /* PACGA */
5324         if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) {
5325             goto do_unallocated;
5326         }
5327         gen_helper_pacga(cpu_reg(s, rd), cpu_env,
5328                          cpu_reg(s, rn), cpu_reg_sp(s, rm));
5329         break;
5330     case 16:
5331     case 17:
5332     case 18:
5333     case 19:
5334     case 20:
5335     case 21:
5336     case 22:
5337     case 23: /* CRC32 */
5338     {
5339         int sz = extract32(opcode, 0, 2);
5340         bool crc32c = extract32(opcode, 2, 1);
5341         handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
5342         break;
5343     }
5344     default:
5345     do_unallocated:
5346         unallocated_encoding(s);
5347         break;
5348     }
5349 }
5350 
5351 /*
5352  * Data processing - register
5353  *  31  30 29  28      25    21  20  16      10         0
5354  * +--+---+--+---+-------+-----+-------+-------+---------+
5355  * |  |op0|  |op1| 1 0 1 | op2 |       |  op3  |         |
5356  * +--+---+--+---+-------+-----+-------+-------+---------+
5357  */
disas_data_proc_reg(DisasContext * s,uint32_t insn)5358 static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
5359 {
5360     int op0 = extract32(insn, 30, 1);
5361     int op1 = extract32(insn, 28, 1);
5362     int op2 = extract32(insn, 21, 4);
5363     int op3 = extract32(insn, 10, 6);
5364 
5365     if (!op1) {
5366         if (op2 & 8) {
5367             if (op2 & 1) {
5368                 /* Add/sub (extended register) */
5369                 disas_add_sub_ext_reg(s, insn);
5370             } else {
5371                 /* Add/sub (shifted register) */
5372                 disas_add_sub_reg(s, insn);
5373             }
5374         } else {
5375             /* Logical (shifted register) */
5376             disas_logic_reg(s, insn);
5377         }
5378         return;
5379     }
5380 
5381     switch (op2) {
5382     case 0x0:
5383         switch (op3) {
5384         case 0x00: /* Add/subtract (with carry) */
5385             disas_adc_sbc(s, insn);
5386             break;
5387 
5388         case 0x01: /* Rotate right into flags */
5389         case 0x21:
5390             disas_rotate_right_into_flags(s, insn);
5391             break;
5392 
5393         case 0x02: /* Evaluate into flags */
5394         case 0x12:
5395         case 0x22:
5396         case 0x32:
5397             disas_evaluate_into_flags(s, insn);
5398             break;
5399 
5400         default:
5401             goto do_unallocated;
5402         }
5403         break;
5404 
5405     case 0x2: /* Conditional compare */
5406         disas_cc(s, insn); /* both imm and reg forms */
5407         break;
5408 
5409     case 0x4: /* Conditional select */
5410         disas_cond_select(s, insn);
5411         break;
5412 
5413     case 0x6: /* Data-processing */
5414         if (op0) {    /* (1 source) */
5415             disas_data_proc_1src(s, insn);
5416         } else {      /* (2 source) */
5417             disas_data_proc_2src(s, insn);
5418         }
5419         break;
5420     case 0x8 ... 0xf: /* (3 source) */
5421         disas_data_proc_3src(s, insn);
5422         break;
5423 
5424     default:
5425     do_unallocated:
5426         unallocated_encoding(s);
5427         break;
5428     }
5429 }
5430 
handle_fp_compare(DisasContext * s,int size,unsigned int rn,unsigned int rm,bool cmp_with_zero,bool signal_all_nans)5431 static void handle_fp_compare(DisasContext *s, int size,
5432                               unsigned int rn, unsigned int rm,
5433                               bool cmp_with_zero, bool signal_all_nans)
5434 {
5435     TCGv_i64 tcg_flags = tcg_temp_new_i64();
5436     TCGv_ptr fpst = get_fpstatus_ptr(size == MO_16);
5437 
5438     if (size == MO_64) {
5439         TCGv_i64 tcg_vn, tcg_vm;
5440 
5441         tcg_vn = read_fp_dreg(s, rn);
5442         if (cmp_with_zero) {
5443             tcg_vm = tcg_const_i64(0);
5444         } else {
5445             tcg_vm = read_fp_dreg(s, rm);
5446         }
5447         if (signal_all_nans) {
5448             gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5449         } else {
5450             gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5451         }
5452         tcg_temp_free_i64(tcg_vn);
5453         tcg_temp_free_i64(tcg_vm);
5454     } else {
5455         TCGv_i32 tcg_vn = tcg_temp_new_i32();
5456         TCGv_i32 tcg_vm = tcg_temp_new_i32();
5457 
5458         read_vec_element_i32(s, tcg_vn, rn, 0, size);
5459         if (cmp_with_zero) {
5460             tcg_gen_movi_i32(tcg_vm, 0);
5461         } else {
5462             read_vec_element_i32(s, tcg_vm, rm, 0, size);
5463         }
5464 
5465         switch (size) {
5466         case MO_32:
5467             if (signal_all_nans) {
5468                 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5469             } else {
5470                 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5471             }
5472             break;
5473         case MO_16:
5474             if (signal_all_nans) {
5475                 gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5476             } else {
5477                 gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5478             }
5479             break;
5480         default:
5481             g_assert_not_reached();
5482         }
5483 
5484         tcg_temp_free_i32(tcg_vn);
5485         tcg_temp_free_i32(tcg_vm);
5486     }
5487 
5488     tcg_temp_free_ptr(fpst);
5489 
5490     gen_set_nzcv(tcg_flags);
5491 
5492     tcg_temp_free_i64(tcg_flags);
5493 }
5494 
5495 /* Floating point compare
5496  *   31  30  29 28       24 23  22  21 20  16 15 14 13  10    9    5 4     0
5497  * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
5498  * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | op  | 1 0 0 0 |  Rn  |  op2  |
5499  * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
5500  */
disas_fp_compare(DisasContext * s,uint32_t insn)5501 static void disas_fp_compare(DisasContext *s, uint32_t insn)
5502 {
5503     unsigned int mos, type, rm, op, rn, opc, op2r;
5504     int size;
5505 
5506     mos = extract32(insn, 29, 3);
5507     type = extract32(insn, 22, 2);
5508     rm = extract32(insn, 16, 5);
5509     op = extract32(insn, 14, 2);
5510     rn = extract32(insn, 5, 5);
5511     opc = extract32(insn, 3, 2);
5512     op2r = extract32(insn, 0, 3);
5513 
5514     if (mos || op || op2r) {
5515         unallocated_encoding(s);
5516         return;
5517     }
5518 
5519     switch (type) {
5520     case 0:
5521         size = MO_32;
5522         break;
5523     case 1:
5524         size = MO_64;
5525         break;
5526     case 3:
5527         size = MO_16;
5528         if (dc_isar_feature(aa64_fp16, s)) {
5529             break;
5530         }
5531         /* fallthru */
5532     default:
5533         unallocated_encoding(s);
5534         return;
5535     }
5536 
5537     if (!fp_access_check(s)) {
5538         return;
5539     }
5540 
5541     handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2);
5542 }
5543 
5544 /* Floating point conditional compare
5545  *   31  30  29 28       24 23  22  21 20  16 15  12 11 10 9    5  4   3    0
5546  * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
5547  * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | cond | 0 1 |  Rn  | op | nzcv |
5548  * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
5549  */
disas_fp_ccomp(DisasContext * s,uint32_t insn)5550 static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
5551 {
5552     unsigned int mos, type, rm, cond, rn, op, nzcv;
5553     TCGv_i64 tcg_flags;
5554     TCGLabel *label_continue = NULL;
5555     int size;
5556 
5557     mos = extract32(insn, 29, 3);
5558     type = extract32(insn, 22, 2);
5559     rm = extract32(insn, 16, 5);
5560     cond = extract32(insn, 12, 4);
5561     rn = extract32(insn, 5, 5);
5562     op = extract32(insn, 4, 1);
5563     nzcv = extract32(insn, 0, 4);
5564 
5565     if (mos) {
5566         unallocated_encoding(s);
5567         return;
5568     }
5569 
5570     switch (type) {
5571     case 0:
5572         size = MO_32;
5573         break;
5574     case 1:
5575         size = MO_64;
5576         break;
5577     case 3:
5578         size = MO_16;
5579         if (dc_isar_feature(aa64_fp16, s)) {
5580             break;
5581         }
5582         /* fallthru */
5583     default:
5584         unallocated_encoding(s);
5585         return;
5586     }
5587 
5588     if (!fp_access_check(s)) {
5589         return;
5590     }
5591 
5592     if (cond < 0x0e) { /* not always */
5593         TCGLabel *label_match = gen_new_label();
5594         label_continue = gen_new_label();
5595         arm_gen_test_cc(cond, label_match);
5596         /* nomatch: */
5597         tcg_flags = tcg_const_i64(nzcv << 28);
5598         gen_set_nzcv(tcg_flags);
5599         tcg_temp_free_i64(tcg_flags);
5600         tcg_gen_br(label_continue);
5601         gen_set_label(label_match);
5602     }
5603 
5604     handle_fp_compare(s, size, rn, rm, false, op);
5605 
5606     if (cond < 0x0e) {
5607         gen_set_label(label_continue);
5608     }
5609 }
5610 
5611 /* Floating point conditional select
5612  *   31  30  29 28       24 23  22  21 20  16 15  12 11 10 9    5 4    0
5613  * +---+---+---+-----------+------+---+------+------+-----+------+------+
5614  * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | cond | 1 1 |  Rn  |  Rd  |
5615  * +---+---+---+-----------+------+---+------+------+-----+------+------+
5616  */
disas_fp_csel(DisasContext * s,uint32_t insn)5617 static void disas_fp_csel(DisasContext *s, uint32_t insn)
5618 {
5619     unsigned int mos, type, rm, cond, rn, rd;
5620     TCGv_i64 t_true, t_false, t_zero;
5621     DisasCompare64 c;
5622     MemOp sz;
5623 
5624     mos = extract32(insn, 29, 3);
5625     type = extract32(insn, 22, 2);
5626     rm = extract32(insn, 16, 5);
5627     cond = extract32(insn, 12, 4);
5628     rn = extract32(insn, 5, 5);
5629     rd = extract32(insn, 0, 5);
5630 
5631     if (mos) {
5632         unallocated_encoding(s);
5633         return;
5634     }
5635 
5636     switch (type) {
5637     case 0:
5638         sz = MO_32;
5639         break;
5640     case 1:
5641         sz = MO_64;
5642         break;
5643     case 3:
5644         sz = MO_16;
5645         if (dc_isar_feature(aa64_fp16, s)) {
5646             break;
5647         }
5648         /* fallthru */
5649     default:
5650         unallocated_encoding(s);
5651         return;
5652     }
5653 
5654     if (!fp_access_check(s)) {
5655         return;
5656     }
5657 
5658     /* Zero extend sreg & hreg inputs to 64 bits now.  */
5659     t_true = tcg_temp_new_i64();
5660     t_false = tcg_temp_new_i64();
5661     read_vec_element(s, t_true, rn, 0, sz);
5662     read_vec_element(s, t_false, rm, 0, sz);
5663 
5664     a64_test_cc(&c, cond);
5665     t_zero = tcg_const_i64(0);
5666     tcg_gen_movcond_i64(c.cond, t_true, c.value, t_zero, t_true, t_false);
5667     tcg_temp_free_i64(t_zero);
5668     tcg_temp_free_i64(t_false);
5669     a64_free_cc(&c);
5670 
5671     /* Note that sregs & hregs write back zeros to the high bits,
5672        and we've already done the zero-extension.  */
5673     write_fp_dreg(s, rd, t_true);
5674     tcg_temp_free_i64(t_true);
5675 }
5676 
5677 /* Floating-point data-processing (1 source) - half precision */
handle_fp_1src_half(DisasContext * s,int opcode,int rd,int rn)5678 static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
5679 {
5680     TCGv_ptr fpst = NULL;
5681     TCGv_i32 tcg_op = read_fp_hreg(s, rn);
5682     TCGv_i32 tcg_res = tcg_temp_new_i32();
5683 
5684     switch (opcode) {
5685     case 0x0: /* FMOV */
5686         tcg_gen_mov_i32(tcg_res, tcg_op);
5687         break;
5688     case 0x1: /* FABS */
5689         tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
5690         break;
5691     case 0x2: /* FNEG */
5692         tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
5693         break;
5694     case 0x3: /* FSQRT */
5695         fpst = get_fpstatus_ptr(true);
5696         gen_helper_sqrt_f16(tcg_res, tcg_op, fpst);
5697         break;
5698     case 0x8: /* FRINTN */
5699     case 0x9: /* FRINTP */
5700     case 0xa: /* FRINTM */
5701     case 0xb: /* FRINTZ */
5702     case 0xc: /* FRINTA */
5703     {
5704         TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
5705         fpst = get_fpstatus_ptr(true);
5706 
5707         gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5708         gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
5709 
5710         gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5711         tcg_temp_free_i32(tcg_rmode);
5712         break;
5713     }
5714     case 0xe: /* FRINTX */
5715         fpst = get_fpstatus_ptr(true);
5716         gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
5717         break;
5718     case 0xf: /* FRINTI */
5719         fpst = get_fpstatus_ptr(true);
5720         gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
5721         break;
5722     default:
5723         abort();
5724     }
5725 
5726     write_fp_sreg(s, rd, tcg_res);
5727 
5728     if (fpst) {
5729         tcg_temp_free_ptr(fpst);
5730     }
5731     tcg_temp_free_i32(tcg_op);
5732     tcg_temp_free_i32(tcg_res);
5733 }
5734 
5735 /* Floating-point data-processing (1 source) - single precision */
handle_fp_1src_single(DisasContext * s,int opcode,int rd,int rn)5736 static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
5737 {
5738     void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr);
5739     TCGv_i32 tcg_op, tcg_res;
5740     TCGv_ptr fpst;
5741     int rmode = -1;
5742 
5743     tcg_op = read_fp_sreg(s, rn);
5744     tcg_res = tcg_temp_new_i32();
5745 
5746     switch (opcode) {
5747     case 0x0: /* FMOV */
5748         tcg_gen_mov_i32(tcg_res, tcg_op);
5749         goto done;
5750     case 0x1: /* FABS */
5751         gen_helper_vfp_abss(tcg_res, tcg_op);
5752         goto done;
5753     case 0x2: /* FNEG */
5754         gen_helper_vfp_negs(tcg_res, tcg_op);
5755         goto done;
5756     case 0x3: /* FSQRT */
5757         gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
5758         goto done;
5759     case 0x8: /* FRINTN */
5760     case 0x9: /* FRINTP */
5761     case 0xa: /* FRINTM */
5762     case 0xb: /* FRINTZ */
5763     case 0xc: /* FRINTA */
5764         rmode = arm_rmode_to_sf(opcode & 7);
5765         gen_fpst = gen_helper_rints;
5766         break;
5767     case 0xe: /* FRINTX */
5768         gen_fpst = gen_helper_rints_exact;
5769         break;
5770     case 0xf: /* FRINTI */
5771         gen_fpst = gen_helper_rints;
5772         break;
5773     case 0x10: /* FRINT32Z */
5774         rmode = float_round_to_zero;
5775         gen_fpst = gen_helper_frint32_s;
5776         break;
5777     case 0x11: /* FRINT32X */
5778         gen_fpst = gen_helper_frint32_s;
5779         break;
5780     case 0x12: /* FRINT64Z */
5781         rmode = float_round_to_zero;
5782         gen_fpst = gen_helper_frint64_s;
5783         break;
5784     case 0x13: /* FRINT64X */
5785         gen_fpst = gen_helper_frint64_s;
5786         break;
5787     default:
5788         g_assert_not_reached();
5789     }
5790 
5791     fpst = get_fpstatus_ptr(false);
5792     if (rmode >= 0) {
5793         TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
5794         gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5795         gen_fpst(tcg_res, tcg_op, fpst);
5796         gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5797         tcg_temp_free_i32(tcg_rmode);
5798     } else {
5799         gen_fpst(tcg_res, tcg_op, fpst);
5800     }
5801     tcg_temp_free_ptr(fpst);
5802 
5803  done:
5804     write_fp_sreg(s, rd, tcg_res);
5805     tcg_temp_free_i32(tcg_op);
5806     tcg_temp_free_i32(tcg_res);
5807 }
5808 
5809 /* Floating-point data-processing (1 source) - double precision */
handle_fp_1src_double(DisasContext * s,int opcode,int rd,int rn)5810 static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
5811 {
5812     void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr);
5813     TCGv_i64 tcg_op, tcg_res;
5814     TCGv_ptr fpst;
5815     int rmode = -1;
5816 
5817     switch (opcode) {
5818     case 0x0: /* FMOV */
5819         gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
5820         return;
5821     }
5822 
5823     tcg_op = read_fp_dreg(s, rn);
5824     tcg_res = tcg_temp_new_i64();
5825 
5826     switch (opcode) {
5827     case 0x1: /* FABS */
5828         gen_helper_vfp_absd(tcg_res, tcg_op);
5829         goto done;
5830     case 0x2: /* FNEG */
5831         gen_helper_vfp_negd(tcg_res, tcg_op);
5832         goto done;
5833     case 0x3: /* FSQRT */
5834         gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
5835         goto done;
5836     case 0x8: /* FRINTN */
5837     case 0x9: /* FRINTP */
5838     case 0xa: /* FRINTM */
5839     case 0xb: /* FRINTZ */
5840     case 0xc: /* FRINTA */
5841         rmode = arm_rmode_to_sf(opcode & 7);
5842         gen_fpst = gen_helper_rintd;
5843         break;
5844     case 0xe: /* FRINTX */
5845         gen_fpst = gen_helper_rintd_exact;
5846         break;
5847     case 0xf: /* FRINTI */
5848         gen_fpst = gen_helper_rintd;
5849         break;
5850     case 0x10: /* FRINT32Z */
5851         rmode = float_round_to_zero;
5852         gen_fpst = gen_helper_frint32_d;
5853         break;
5854     case 0x11: /* FRINT32X */
5855         gen_fpst = gen_helper_frint32_d;
5856         break;
5857     case 0x12: /* FRINT64Z */
5858         rmode = float_round_to_zero;
5859         gen_fpst = gen_helper_frint64_d;
5860         break;
5861     case 0x13: /* FRINT64X */
5862         gen_fpst = gen_helper_frint64_d;
5863         break;
5864     default:
5865         g_assert_not_reached();
5866     }
5867 
5868     fpst = get_fpstatus_ptr(false);
5869     if (rmode >= 0) {
5870         TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
5871         gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5872         gen_fpst(tcg_res, tcg_op, fpst);
5873         gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5874         tcg_temp_free_i32(tcg_rmode);
5875     } else {
5876         gen_fpst(tcg_res, tcg_op, fpst);
5877     }
5878     tcg_temp_free_ptr(fpst);
5879 
5880  done:
5881     write_fp_dreg(s, rd, tcg_res);
5882     tcg_temp_free_i64(tcg_op);
5883     tcg_temp_free_i64(tcg_res);
5884 }
5885 
handle_fp_fcvt(DisasContext * s,int opcode,int rd,int rn,int dtype,int ntype)5886 static void handle_fp_fcvt(DisasContext *s, int opcode,
5887                            int rd, int rn, int dtype, int ntype)
5888 {
5889     switch (ntype) {
5890     case 0x0:
5891     {
5892         TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
5893         if (dtype == 1) {
5894             /* Single to double */
5895             TCGv_i64 tcg_rd = tcg_temp_new_i64();
5896             gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
5897             write_fp_dreg(s, rd, tcg_rd);
5898             tcg_temp_free_i64(tcg_rd);
5899         } else {
5900             /* Single to half */
5901             TCGv_i32 tcg_rd = tcg_temp_new_i32();
5902             TCGv_i32 ahp = get_ahp_flag();
5903             TCGv_ptr fpst = get_fpstatus_ptr(false);
5904 
5905             gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp);
5906             /* write_fp_sreg is OK here because top half of tcg_rd is zero */
5907             write_fp_sreg(s, rd, tcg_rd);
5908             tcg_temp_free_i32(tcg_rd);
5909             tcg_temp_free_i32(ahp);
5910             tcg_temp_free_ptr(fpst);
5911         }
5912         tcg_temp_free_i32(tcg_rn);
5913         break;
5914     }
5915     case 0x1:
5916     {
5917         TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
5918         TCGv_i32 tcg_rd = tcg_temp_new_i32();
5919         if (dtype == 0) {
5920             /* Double to single */
5921             gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
5922         } else {
5923             TCGv_ptr fpst = get_fpstatus_ptr(false);
5924             TCGv_i32 ahp = get_ahp_flag();
5925             /* Double to half */
5926             gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
5927             /* write_fp_sreg is OK here because top half of tcg_rd is zero */
5928             tcg_temp_free_ptr(fpst);
5929             tcg_temp_free_i32(ahp);
5930         }
5931         write_fp_sreg(s, rd, tcg_rd);
5932         tcg_temp_free_i32(tcg_rd);
5933         tcg_temp_free_i64(tcg_rn);
5934         break;
5935     }
5936     case 0x3:
5937     {
5938         TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
5939         TCGv_ptr tcg_fpst = get_fpstatus_ptr(false);
5940         TCGv_i32 tcg_ahp = get_ahp_flag();
5941         tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
5942         if (dtype == 0) {
5943             /* Half to single */
5944             TCGv_i32 tcg_rd = tcg_temp_new_i32();
5945             gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
5946             write_fp_sreg(s, rd, tcg_rd);
5947             tcg_temp_free_i32(tcg_rd);
5948         } else {
5949             /* Half to double */
5950             TCGv_i64 tcg_rd = tcg_temp_new_i64();
5951             gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
5952             write_fp_dreg(s, rd, tcg_rd);
5953             tcg_temp_free_i64(tcg_rd);
5954         }
5955         tcg_temp_free_i32(tcg_rn);
5956         tcg_temp_free_ptr(tcg_fpst);
5957         tcg_temp_free_i32(tcg_ahp);
5958         break;
5959     }
5960     default:
5961         abort();
5962     }
5963 }
5964 
5965 /* Floating point data-processing (1 source)
5966  *   31  30  29 28       24 23  22  21 20    15 14       10 9    5 4    0
5967  * +---+---+---+-----------+------+---+--------+-----------+------+------+
5968  * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 |  Rn  |  Rd  |
5969  * +---+---+---+-----------+------+---+--------+-----------+------+------+
5970  */
disas_fp_1src(DisasContext * s,uint32_t insn)5971 static void disas_fp_1src(DisasContext *s, uint32_t insn)
5972 {
5973     int mos = extract32(insn, 29, 3);
5974     int type = extract32(insn, 22, 2);
5975     int opcode = extract32(insn, 15, 6);
5976     int rn = extract32(insn, 5, 5);
5977     int rd = extract32(insn, 0, 5);
5978 
5979     if (mos) {
5980         unallocated_encoding(s);
5981         return;
5982     }
5983 
5984     switch (opcode) {
5985     case 0x4: case 0x5: case 0x7:
5986     {
5987         /* FCVT between half, single and double precision */
5988         int dtype = extract32(opcode, 0, 2);
5989         if (type == 2 || dtype == type) {
5990             unallocated_encoding(s);
5991             return;
5992         }
5993         if (!fp_access_check(s)) {
5994             return;
5995         }
5996 
5997         handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
5998         break;
5999     }
6000 
6001     case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
6002         if (type > 1 || !dc_isar_feature(aa64_frint, s)) {
6003             unallocated_encoding(s);
6004             return;
6005         }
6006         /* fall through */
6007     case 0x0 ... 0x3:
6008     case 0x8 ... 0xc:
6009     case 0xe ... 0xf:
6010         /* 32-to-32 and 64-to-64 ops */
6011         switch (type) {
6012         case 0:
6013             if (!fp_access_check(s)) {
6014                 return;
6015             }
6016             handle_fp_1src_single(s, opcode, rd, rn);
6017             break;
6018         case 1:
6019             if (!fp_access_check(s)) {
6020                 return;
6021             }
6022             handle_fp_1src_double(s, opcode, rd, rn);
6023             break;
6024         case 3:
6025             if (!dc_isar_feature(aa64_fp16, s)) {
6026                 unallocated_encoding(s);
6027                 return;
6028             }
6029 
6030             if (!fp_access_check(s)) {
6031                 return;
6032             }
6033             handle_fp_1src_half(s, opcode, rd, rn);
6034             break;
6035         default:
6036             unallocated_encoding(s);
6037         }
6038         break;
6039 
6040     default:
6041         unallocated_encoding(s);
6042         break;
6043     }
6044 }
6045 
6046 /* Floating-point data-processing (2 source) - single precision */
handle_fp_2src_single(DisasContext * s,int opcode,int rd,int rn,int rm)6047 static void handle_fp_2src_single(DisasContext *s, int opcode,
6048                                   int rd, int rn, int rm)
6049 {
6050     TCGv_i32 tcg_op1;
6051     TCGv_i32 tcg_op2;
6052     TCGv_i32 tcg_res;
6053     TCGv_ptr fpst;
6054 
6055     tcg_res = tcg_temp_new_i32();
6056     fpst = get_fpstatus_ptr(false);
6057     tcg_op1 = read_fp_sreg(s, rn);
6058     tcg_op2 = read_fp_sreg(s, rm);
6059 
6060     switch (opcode) {
6061     case 0x0: /* FMUL */
6062         gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6063         break;
6064     case 0x1: /* FDIV */
6065         gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
6066         break;
6067     case 0x2: /* FADD */
6068         gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
6069         break;
6070     case 0x3: /* FSUB */
6071         gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
6072         break;
6073     case 0x4: /* FMAX */
6074         gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
6075         break;
6076     case 0x5: /* FMIN */
6077         gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
6078         break;
6079     case 0x6: /* FMAXNM */
6080         gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
6081         break;
6082     case 0x7: /* FMINNM */
6083         gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
6084         break;
6085     case 0x8: /* FNMUL */
6086         gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6087         gen_helper_vfp_negs(tcg_res, tcg_res);
6088         break;
6089     }
6090 
6091     write_fp_sreg(s, rd, tcg_res);
6092 
6093     tcg_temp_free_ptr(fpst);
6094     tcg_temp_free_i32(tcg_op1);
6095     tcg_temp_free_i32(tcg_op2);
6096     tcg_temp_free_i32(tcg_res);
6097 }
6098 
6099 /* Floating-point data-processing (2 source) - double precision */
handle_fp_2src_double(DisasContext * s,int opcode,int rd,int rn,int rm)6100 static void handle_fp_2src_double(DisasContext *s, int opcode,
6101                                   int rd, int rn, int rm)
6102 {
6103     TCGv_i64 tcg_op1;
6104     TCGv_i64 tcg_op2;
6105     TCGv_i64 tcg_res;
6106     TCGv_ptr fpst;
6107 
6108     tcg_res = tcg_temp_new_i64();
6109     fpst = get_fpstatus_ptr(false);
6110     tcg_op1 = read_fp_dreg(s, rn);
6111     tcg_op2 = read_fp_dreg(s, rm);
6112 
6113     switch (opcode) {
6114     case 0x0: /* FMUL */
6115         gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6116         break;
6117     case 0x1: /* FDIV */
6118         gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
6119         break;
6120     case 0x2: /* FADD */
6121         gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
6122         break;
6123     case 0x3: /* FSUB */
6124         gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
6125         break;
6126     case 0x4: /* FMAX */
6127         gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
6128         break;
6129     case 0x5: /* FMIN */
6130         gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
6131         break;
6132     case 0x6: /* FMAXNM */
6133         gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6134         break;
6135     case 0x7: /* FMINNM */
6136         gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6137         break;
6138     case 0x8: /* FNMUL */
6139         gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6140         gen_helper_vfp_negd(tcg_res, tcg_res);
6141         break;
6142     }
6143 
6144     write_fp_dreg(s, rd, tcg_res);
6145 
6146     tcg_temp_free_ptr(fpst);
6147     tcg_temp_free_i64(tcg_op1);
6148     tcg_temp_free_i64(tcg_op2);
6149     tcg_temp_free_i64(tcg_res);
6150 }
6151 
6152 /* Floating-point data-processing (2 source) - half precision */
handle_fp_2src_half(DisasContext * s,int opcode,int rd,int rn,int rm)6153 static void handle_fp_2src_half(DisasContext *s, int opcode,
6154                                 int rd, int rn, int rm)
6155 {
6156     TCGv_i32 tcg_op1;
6157     TCGv_i32 tcg_op2;
6158     TCGv_i32 tcg_res;
6159     TCGv_ptr fpst;
6160 
6161     tcg_res = tcg_temp_new_i32();
6162     fpst = get_fpstatus_ptr(true);
6163     tcg_op1 = read_fp_hreg(s, rn);
6164     tcg_op2 = read_fp_hreg(s, rm);
6165 
6166     switch (opcode) {
6167     case 0x0: /* FMUL */
6168         gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6169         break;
6170     case 0x1: /* FDIV */
6171         gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
6172         break;
6173     case 0x2: /* FADD */
6174         gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
6175         break;
6176     case 0x3: /* FSUB */
6177         gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
6178         break;
6179     case 0x4: /* FMAX */
6180         gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
6181         break;
6182     case 0x5: /* FMIN */
6183         gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
6184         break;
6185     case 0x6: /* FMAXNM */
6186         gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6187         break;
6188     case 0x7: /* FMINNM */
6189         gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6190         break;
6191     case 0x8: /* FNMUL */
6192         gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6193         tcg_gen_xori_i32(tcg_res, tcg_res, 0x8000);
6194         break;
6195     default:
6196         g_assert_not_reached();
6197     }
6198 
6199     write_fp_sreg(s, rd, tcg_res);
6200 
6201     tcg_temp_free_ptr(fpst);
6202     tcg_temp_free_i32(tcg_op1);
6203     tcg_temp_free_i32(tcg_op2);
6204     tcg_temp_free_i32(tcg_res);
6205 }
6206 
6207 /* Floating point data-processing (2 source)
6208  *   31  30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
6209  * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6210  * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | opcode | 1 0 |  Rn  |  Rd  |
6211  * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6212  */
disas_fp_2src(DisasContext * s,uint32_t insn)6213 static void disas_fp_2src(DisasContext *s, uint32_t insn)
6214 {
6215     int mos = extract32(insn, 29, 3);
6216     int type = extract32(insn, 22, 2);
6217     int rd = extract32(insn, 0, 5);
6218     int rn = extract32(insn, 5, 5);
6219     int rm = extract32(insn, 16, 5);
6220     int opcode = extract32(insn, 12, 4);
6221 
6222     if (opcode > 8 || mos) {
6223         unallocated_encoding(s);
6224         return;
6225     }
6226 
6227     switch (type) {
6228     case 0:
6229         if (!fp_access_check(s)) {
6230             return;
6231         }
6232         handle_fp_2src_single(s, opcode, rd, rn, rm);
6233         break;
6234     case 1:
6235         if (!fp_access_check(s)) {
6236             return;
6237         }
6238         handle_fp_2src_double(s, opcode, rd, rn, rm);
6239         break;
6240     case 3:
6241         if (!dc_isar_feature(aa64_fp16, s)) {
6242             unallocated_encoding(s);
6243             return;
6244         }
6245         if (!fp_access_check(s)) {
6246             return;
6247         }
6248         handle_fp_2src_half(s, opcode, rd, rn, rm);
6249         break;
6250     default:
6251         unallocated_encoding(s);
6252     }
6253 }
6254 
6255 /* Floating-point data-processing (3 source) - single precision */
handle_fp_3src_single(DisasContext * s,bool o0,bool o1,int rd,int rn,int rm,int ra)6256 static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
6257                                   int rd, int rn, int rm, int ra)
6258 {
6259     TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6260     TCGv_i32 tcg_res = tcg_temp_new_i32();
6261     TCGv_ptr fpst = get_fpstatus_ptr(false);
6262 
6263     tcg_op1 = read_fp_sreg(s, rn);
6264     tcg_op2 = read_fp_sreg(s, rm);
6265     tcg_op3 = read_fp_sreg(s, ra);
6266 
6267     /* These are fused multiply-add, and must be done as one
6268      * floating point operation with no rounding between the
6269      * multiplication and addition steps.
6270      * NB that doing the negations here as separate steps is
6271      * correct : an input NaN should come out with its sign bit
6272      * flipped if it is a negated-input.
6273      */
6274     if (o1 == true) {
6275         gen_helper_vfp_negs(tcg_op3, tcg_op3);
6276     }
6277 
6278     if (o0 != o1) {
6279         gen_helper_vfp_negs(tcg_op1, tcg_op1);
6280     }
6281 
6282     gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6283 
6284     write_fp_sreg(s, rd, tcg_res);
6285 
6286     tcg_temp_free_ptr(fpst);
6287     tcg_temp_free_i32(tcg_op1);
6288     tcg_temp_free_i32(tcg_op2);
6289     tcg_temp_free_i32(tcg_op3);
6290     tcg_temp_free_i32(tcg_res);
6291 }
6292 
6293 /* Floating-point data-processing (3 source) - double precision */
handle_fp_3src_double(DisasContext * s,bool o0,bool o1,int rd,int rn,int rm,int ra)6294 static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
6295                                   int rd, int rn, int rm, int ra)
6296 {
6297     TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
6298     TCGv_i64 tcg_res = tcg_temp_new_i64();
6299     TCGv_ptr fpst = get_fpstatus_ptr(false);
6300 
6301     tcg_op1 = read_fp_dreg(s, rn);
6302     tcg_op2 = read_fp_dreg(s, rm);
6303     tcg_op3 = read_fp_dreg(s, ra);
6304 
6305     /* These are fused multiply-add, and must be done as one
6306      * floating point operation with no rounding between the
6307      * multiplication and addition steps.
6308      * NB that doing the negations here as separate steps is
6309      * correct : an input NaN should come out with its sign bit
6310      * flipped if it is a negated-input.
6311      */
6312     if (o1 == true) {
6313         gen_helper_vfp_negd(tcg_op3, tcg_op3);
6314     }
6315 
6316     if (o0 != o1) {
6317         gen_helper_vfp_negd(tcg_op1, tcg_op1);
6318     }
6319 
6320     gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6321 
6322     write_fp_dreg(s, rd, tcg_res);
6323 
6324     tcg_temp_free_ptr(fpst);
6325     tcg_temp_free_i64(tcg_op1);
6326     tcg_temp_free_i64(tcg_op2);
6327     tcg_temp_free_i64(tcg_op3);
6328     tcg_temp_free_i64(tcg_res);
6329 }
6330 
6331 /* Floating-point data-processing (3 source) - half precision */
handle_fp_3src_half(DisasContext * s,bool o0,bool o1,int rd,int rn,int rm,int ra)6332 static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1,
6333                                 int rd, int rn, int rm, int ra)
6334 {
6335     TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6336     TCGv_i32 tcg_res = tcg_temp_new_i32();
6337     TCGv_ptr fpst = get_fpstatus_ptr(true);
6338 
6339     tcg_op1 = read_fp_hreg(s, rn);
6340     tcg_op2 = read_fp_hreg(s, rm);
6341     tcg_op3 = read_fp_hreg(s, ra);
6342 
6343     /* These are fused multiply-add, and must be done as one
6344      * floating point operation with no rounding between the
6345      * multiplication and addition steps.
6346      * NB that doing the negations here as separate steps is
6347      * correct : an input NaN should come out with its sign bit
6348      * flipped if it is a negated-input.
6349      */
6350     if (o1 == true) {
6351         tcg_gen_xori_i32(tcg_op3, tcg_op3, 0x8000);
6352     }
6353 
6354     if (o0 != o1) {
6355         tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
6356     }
6357 
6358     gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6359 
6360     write_fp_sreg(s, rd, tcg_res);
6361 
6362     tcg_temp_free_ptr(fpst);
6363     tcg_temp_free_i32(tcg_op1);
6364     tcg_temp_free_i32(tcg_op2);
6365     tcg_temp_free_i32(tcg_op3);
6366     tcg_temp_free_i32(tcg_res);
6367 }
6368 
6369 /* Floating point data-processing (3 source)
6370  *   31  30  29 28       24 23  22  21  20  16  15  14  10 9    5 4    0
6371  * +---+---+---+-----------+------+----+------+----+------+------+------+
6372  * | M | 0 | S | 1 1 1 1 1 | type | o1 |  Rm  | o0 |  Ra  |  Rn  |  Rd  |
6373  * +---+---+---+-----------+------+----+------+----+------+------+------+
6374  */
disas_fp_3src(DisasContext * s,uint32_t insn)6375 static void disas_fp_3src(DisasContext *s, uint32_t insn)
6376 {
6377     int mos = extract32(insn, 29, 3);
6378     int type = extract32(insn, 22, 2);
6379     int rd = extract32(insn, 0, 5);
6380     int rn = extract32(insn, 5, 5);
6381     int ra = extract32(insn, 10, 5);
6382     int rm = extract32(insn, 16, 5);
6383     bool o0 = extract32(insn, 15, 1);
6384     bool o1 = extract32(insn, 21, 1);
6385 
6386     if (mos) {
6387         unallocated_encoding(s);
6388         return;
6389     }
6390 
6391     switch (type) {
6392     case 0:
6393         if (!fp_access_check(s)) {
6394             return;
6395         }
6396         handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
6397         break;
6398     case 1:
6399         if (!fp_access_check(s)) {
6400             return;
6401         }
6402         handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
6403         break;
6404     case 3:
6405         if (!dc_isar_feature(aa64_fp16, s)) {
6406             unallocated_encoding(s);
6407             return;
6408         }
6409         if (!fp_access_check(s)) {
6410             return;
6411         }
6412         handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra);
6413         break;
6414     default:
6415         unallocated_encoding(s);
6416     }
6417 }
6418 
6419 /* Floating point immediate
6420  *   31  30  29 28       24 23  22  21 20        13 12   10 9    5 4    0
6421  * +---+---+---+-----------+------+---+------------+-------+------+------+
6422  * | M | 0 | S | 1 1 1 1 0 | type | 1 |    imm8    | 1 0 0 | imm5 |  Rd  |
6423  * +---+---+---+-----------+------+---+------------+-------+------+------+
6424  */
disas_fp_imm(DisasContext * s,uint32_t insn)6425 static void disas_fp_imm(DisasContext *s, uint32_t insn)
6426 {
6427     int rd = extract32(insn, 0, 5);
6428     int imm5 = extract32(insn, 5, 5);
6429     int imm8 = extract32(insn, 13, 8);
6430     int type = extract32(insn, 22, 2);
6431     int mos = extract32(insn, 29, 3);
6432     uint64_t imm;
6433     TCGv_i64 tcg_res;
6434     MemOp sz;
6435 
6436     if (mos || imm5) {
6437         unallocated_encoding(s);
6438         return;
6439     }
6440 
6441     switch (type) {
6442     case 0:
6443         sz = MO_32;
6444         break;
6445     case 1:
6446         sz = MO_64;
6447         break;
6448     case 3:
6449         sz = MO_16;
6450         if (dc_isar_feature(aa64_fp16, s)) {
6451             break;
6452         }
6453         /* fallthru */
6454     default:
6455         unallocated_encoding(s);
6456         return;
6457     }
6458 
6459     if (!fp_access_check(s)) {
6460         return;
6461     }
6462 
6463     imm = vfp_expand_imm(sz, imm8);
6464 
6465     tcg_res = tcg_const_i64(imm);
6466     write_fp_dreg(s, rd, tcg_res);
6467     tcg_temp_free_i64(tcg_res);
6468 }
6469 
6470 /* Handle floating point <=> fixed point conversions. Note that we can
6471  * also deal with fp <=> integer conversions as a special case (scale == 64)
6472  * OPTME: consider handling that special case specially or at least skipping
6473  * the call to scalbn in the helpers for zero shifts.
6474  */
handle_fpfpcvt(DisasContext * s,int rd,int rn,int opcode,bool itof,int rmode,int scale,int sf,int type)6475 static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
6476                            bool itof, int rmode, int scale, int sf, int type)
6477 {
6478     bool is_signed = !(opcode & 1);
6479     TCGv_ptr tcg_fpstatus;
6480     TCGv_i32 tcg_shift, tcg_single;
6481     TCGv_i64 tcg_double;
6482 
6483     tcg_fpstatus = get_fpstatus_ptr(type == 3);
6484 
6485     tcg_shift = tcg_const_i32(64 - scale);
6486 
6487     if (itof) {
6488         TCGv_i64 tcg_int = cpu_reg(s, rn);
6489         if (!sf) {
6490             TCGv_i64 tcg_extend = new_tmp_a64(s);
6491 
6492             if (is_signed) {
6493                 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
6494             } else {
6495                 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
6496             }
6497 
6498             tcg_int = tcg_extend;
6499         }
6500 
6501         switch (type) {
6502         case 1: /* float64 */
6503             tcg_double = tcg_temp_new_i64();
6504             if (is_signed) {
6505                 gen_helper_vfp_sqtod(tcg_double, tcg_int,
6506                                      tcg_shift, tcg_fpstatus);
6507             } else {
6508                 gen_helper_vfp_uqtod(tcg_double, tcg_int,
6509                                      tcg_shift, tcg_fpstatus);
6510             }
6511             write_fp_dreg(s, rd, tcg_double);
6512             tcg_temp_free_i64(tcg_double);
6513             break;
6514 
6515         case 0: /* float32 */
6516             tcg_single = tcg_temp_new_i32();
6517             if (is_signed) {
6518                 gen_helper_vfp_sqtos(tcg_single, tcg_int,
6519                                      tcg_shift, tcg_fpstatus);
6520             } else {
6521                 gen_helper_vfp_uqtos(tcg_single, tcg_int,
6522                                      tcg_shift, tcg_fpstatus);
6523             }
6524             write_fp_sreg(s, rd, tcg_single);
6525             tcg_temp_free_i32(tcg_single);
6526             break;
6527 
6528         case 3: /* float16 */
6529             tcg_single = tcg_temp_new_i32();
6530             if (is_signed) {
6531                 gen_helper_vfp_sqtoh(tcg_single, tcg_int,
6532                                      tcg_shift, tcg_fpstatus);
6533             } else {
6534                 gen_helper_vfp_uqtoh(tcg_single, tcg_int,
6535                                      tcg_shift, tcg_fpstatus);
6536             }
6537             write_fp_sreg(s, rd, tcg_single);
6538             tcg_temp_free_i32(tcg_single);
6539             break;
6540 
6541         default:
6542             g_assert_not_reached();
6543         }
6544     } else {
6545         TCGv_i64 tcg_int = cpu_reg(s, rd);
6546         TCGv_i32 tcg_rmode;
6547 
6548         if (extract32(opcode, 2, 1)) {
6549             /* There are too many rounding modes to all fit into rmode,
6550              * so FCVTA[US] is a special case.
6551              */
6552             rmode = FPROUNDING_TIEAWAY;
6553         }
6554 
6555         tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6556 
6557         gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
6558 
6559         switch (type) {
6560         case 1: /* float64 */
6561             tcg_double = read_fp_dreg(s, rn);
6562             if (is_signed) {
6563                 if (!sf) {
6564                     gen_helper_vfp_tosld(tcg_int, tcg_double,
6565                                          tcg_shift, tcg_fpstatus);
6566                 } else {
6567                     gen_helper_vfp_tosqd(tcg_int, tcg_double,
6568                                          tcg_shift, tcg_fpstatus);
6569                 }
6570             } else {
6571                 if (!sf) {
6572                     gen_helper_vfp_tould(tcg_int, tcg_double,
6573                                          tcg_shift, tcg_fpstatus);
6574                 } else {
6575                     gen_helper_vfp_touqd(tcg_int, tcg_double,
6576                                          tcg_shift, tcg_fpstatus);
6577                 }
6578             }
6579             if (!sf) {
6580                 tcg_gen_ext32u_i64(tcg_int, tcg_int);
6581             }
6582             tcg_temp_free_i64(tcg_double);
6583             break;
6584 
6585         case 0: /* float32 */
6586             tcg_single = read_fp_sreg(s, rn);
6587             if (sf) {
6588                 if (is_signed) {
6589                     gen_helper_vfp_tosqs(tcg_int, tcg_single,
6590                                          tcg_shift, tcg_fpstatus);
6591                 } else {
6592                     gen_helper_vfp_touqs(tcg_int, tcg_single,
6593                                          tcg_shift, tcg_fpstatus);
6594                 }
6595             } else {
6596                 TCGv_i32 tcg_dest = tcg_temp_new_i32();
6597                 if (is_signed) {
6598                     gen_helper_vfp_tosls(tcg_dest, tcg_single,
6599                                          tcg_shift, tcg_fpstatus);
6600                 } else {
6601                     gen_helper_vfp_touls(tcg_dest, tcg_single,
6602                                          tcg_shift, tcg_fpstatus);
6603                 }
6604                 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
6605                 tcg_temp_free_i32(tcg_dest);
6606             }
6607             tcg_temp_free_i32(tcg_single);
6608             break;
6609 
6610         case 3: /* float16 */
6611             tcg_single = read_fp_sreg(s, rn);
6612             if (sf) {
6613                 if (is_signed) {
6614                     gen_helper_vfp_tosqh(tcg_int, tcg_single,
6615                                          tcg_shift, tcg_fpstatus);
6616                 } else {
6617                     gen_helper_vfp_touqh(tcg_int, tcg_single,
6618                                          tcg_shift, tcg_fpstatus);
6619                 }
6620             } else {
6621                 TCGv_i32 tcg_dest = tcg_temp_new_i32();
6622                 if (is_signed) {
6623                     gen_helper_vfp_toslh(tcg_dest, tcg_single,
6624                                          tcg_shift, tcg_fpstatus);
6625                 } else {
6626                     gen_helper_vfp_toulh(tcg_dest, tcg_single,
6627                                          tcg_shift, tcg_fpstatus);
6628                 }
6629                 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
6630                 tcg_temp_free_i32(tcg_dest);
6631             }
6632             tcg_temp_free_i32(tcg_single);
6633             break;
6634 
6635         default:
6636             g_assert_not_reached();
6637         }
6638 
6639         gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
6640         tcg_temp_free_i32(tcg_rmode);
6641     }
6642 
6643     tcg_temp_free_ptr(tcg_fpstatus);
6644     tcg_temp_free_i32(tcg_shift);
6645 }
6646 
6647 /* Floating point <-> fixed point conversions
6648  *   31   30  29 28       24 23  22  21 20   19 18    16 15   10 9    5 4    0
6649  * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
6650  * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale |  Rn  |  Rd  |
6651  * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
6652  */
disas_fp_fixed_conv(DisasContext * s,uint32_t insn)6653 static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
6654 {
6655     int rd = extract32(insn, 0, 5);
6656     int rn = extract32(insn, 5, 5);
6657     int scale = extract32(insn, 10, 6);
6658     int opcode = extract32(insn, 16, 3);
6659     int rmode = extract32(insn, 19, 2);
6660     int type = extract32(insn, 22, 2);
6661     bool sbit = extract32(insn, 29, 1);
6662     bool sf = extract32(insn, 31, 1);
6663     bool itof;
6664 
6665     if (sbit || (!sf && scale < 32)) {
6666         unallocated_encoding(s);
6667         return;
6668     }
6669 
6670     switch (type) {
6671     case 0: /* float32 */
6672     case 1: /* float64 */
6673         break;
6674     case 3: /* float16 */
6675         if (dc_isar_feature(aa64_fp16, s)) {
6676             break;
6677         }
6678         /* fallthru */
6679     default:
6680         unallocated_encoding(s);
6681         return;
6682     }
6683 
6684     switch ((rmode << 3) | opcode) {
6685     case 0x2: /* SCVTF */
6686     case 0x3: /* UCVTF */
6687         itof = true;
6688         break;
6689     case 0x18: /* FCVTZS */
6690     case 0x19: /* FCVTZU */
6691         itof = false;
6692         break;
6693     default:
6694         unallocated_encoding(s);
6695         return;
6696     }
6697 
6698     if (!fp_access_check(s)) {
6699         return;
6700     }
6701 
6702     handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
6703 }
6704 
handle_fmov(DisasContext * s,int rd,int rn,int type,bool itof)6705 static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
6706 {
6707     /* FMOV: gpr to or from float, double, or top half of quad fp reg,
6708      * without conversion.
6709      */
6710 
6711     if (itof) {
6712         TCGv_i64 tcg_rn = cpu_reg(s, rn);
6713         TCGv_i64 tmp;
6714 
6715         switch (type) {
6716         case 0:
6717             /* 32 bit */
6718             tmp = tcg_temp_new_i64();
6719             tcg_gen_ext32u_i64(tmp, tcg_rn);
6720             write_fp_dreg(s, rd, tmp);
6721             tcg_temp_free_i64(tmp);
6722             break;
6723         case 1:
6724             /* 64 bit */
6725             write_fp_dreg(s, rd, tcg_rn);
6726             break;
6727         case 2:
6728             /* 64 bit to top half. */
6729             tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
6730             clear_vec_high(s, true, rd);
6731             break;
6732         case 3:
6733             /* 16 bit */
6734             tmp = tcg_temp_new_i64();
6735             tcg_gen_ext16u_i64(tmp, tcg_rn);
6736             write_fp_dreg(s, rd, tmp);
6737             tcg_temp_free_i64(tmp);
6738             break;
6739         default:
6740             g_assert_not_reached();
6741         }
6742     } else {
6743         TCGv_i64 tcg_rd = cpu_reg(s, rd);
6744 
6745         switch (type) {
6746         case 0:
6747             /* 32 bit */
6748             tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
6749             break;
6750         case 1:
6751             /* 64 bit */
6752             tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
6753             break;
6754         case 2:
6755             /* 64 bits from top half */
6756             tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
6757             break;
6758         case 3:
6759             /* 16 bit */
6760             tcg_gen_ld16u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_16));
6761             break;
6762         default:
6763             g_assert_not_reached();
6764         }
6765     }
6766 }
6767 
handle_fjcvtzs(DisasContext * s,int rd,int rn)6768 static void handle_fjcvtzs(DisasContext *s, int rd, int rn)
6769 {
6770     TCGv_i64 t = read_fp_dreg(s, rn);
6771     TCGv_ptr fpstatus = get_fpstatus_ptr(false);
6772 
6773     gen_helper_fjcvtzs(t, t, fpstatus);
6774 
6775     tcg_temp_free_ptr(fpstatus);
6776 
6777     tcg_gen_ext32u_i64(cpu_reg(s, rd), t);
6778     tcg_gen_extrh_i64_i32(cpu_ZF, t);
6779     tcg_gen_movi_i32(cpu_CF, 0);
6780     tcg_gen_movi_i32(cpu_NF, 0);
6781     tcg_gen_movi_i32(cpu_VF, 0);
6782 
6783     tcg_temp_free_i64(t);
6784 }
6785 
6786 /* Floating point <-> integer conversions
6787  *   31   30  29 28       24 23  22  21 20   19 18 16 15         10 9  5 4  0
6788  * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
6789  * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
6790  * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
6791  */
disas_fp_int_conv(DisasContext * s,uint32_t insn)6792 static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
6793 {
6794     int rd = extract32(insn, 0, 5);
6795     int rn = extract32(insn, 5, 5);
6796     int opcode = extract32(insn, 16, 3);
6797     int rmode = extract32(insn, 19, 2);
6798     int type = extract32(insn, 22, 2);
6799     bool sbit = extract32(insn, 29, 1);
6800     bool sf = extract32(insn, 31, 1);
6801     bool itof = false;
6802 
6803     if (sbit) {
6804         goto do_unallocated;
6805     }
6806 
6807     switch (opcode) {
6808     case 2: /* SCVTF */
6809     case 3: /* UCVTF */
6810         itof = true;
6811         /* fallthru */
6812     case 4: /* FCVTAS */
6813     case 5: /* FCVTAU */
6814         if (rmode != 0) {
6815             goto do_unallocated;
6816         }
6817         /* fallthru */
6818     case 0: /* FCVT[NPMZ]S */
6819     case 1: /* FCVT[NPMZ]U */
6820         switch (type) {
6821         case 0: /* float32 */
6822         case 1: /* float64 */
6823             break;
6824         case 3: /* float16 */
6825             if (!dc_isar_feature(aa64_fp16, s)) {
6826                 goto do_unallocated;
6827             }
6828             break;
6829         default:
6830             goto do_unallocated;
6831         }
6832         if (!fp_access_check(s)) {
6833             return;
6834         }
6835         handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
6836         break;
6837 
6838     default:
6839         switch (sf << 7 | type << 5 | rmode << 3 | opcode) {
6840         case 0b01100110: /* FMOV half <-> 32-bit int */
6841         case 0b01100111:
6842         case 0b11100110: /* FMOV half <-> 64-bit int */
6843         case 0b11100111:
6844             if (!dc_isar_feature(aa64_fp16, s)) {
6845                 goto do_unallocated;
6846             }
6847             /* fallthru */
6848         case 0b00000110: /* FMOV 32-bit */
6849         case 0b00000111:
6850         case 0b10100110: /* FMOV 64-bit */
6851         case 0b10100111:
6852         case 0b11001110: /* FMOV top half of 128-bit */
6853         case 0b11001111:
6854             if (!fp_access_check(s)) {
6855                 return;
6856             }
6857             itof = opcode & 1;
6858             handle_fmov(s, rd, rn, type, itof);
6859             break;
6860 
6861         case 0b00111110: /* FJCVTZS */
6862             if (!dc_isar_feature(aa64_jscvt, s)) {
6863                 goto do_unallocated;
6864             } else if (fp_access_check(s)) {
6865                 handle_fjcvtzs(s, rd, rn);
6866             }
6867             break;
6868 
6869         default:
6870         do_unallocated:
6871             unallocated_encoding(s);
6872             return;
6873         }
6874         break;
6875     }
6876 }
6877 
6878 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
6879  *   31  30  29 28     25 24                          0
6880  * +---+---+---+---------+-----------------------------+
6881  * |   | 0 |   | 1 1 1 1 |                             |
6882  * +---+---+---+---------+-----------------------------+
6883  */
disas_data_proc_fp(DisasContext * s,uint32_t insn)6884 static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
6885 {
6886     if (extract32(insn, 24, 1)) {
6887         /* Floating point data-processing (3 source) */
6888         disas_fp_3src(s, insn);
6889     } else if (extract32(insn, 21, 1) == 0) {
6890         /* Floating point to fixed point conversions */
6891         disas_fp_fixed_conv(s, insn);
6892     } else {
6893         switch (extract32(insn, 10, 2)) {
6894         case 1:
6895             /* Floating point conditional compare */
6896             disas_fp_ccomp(s, insn);
6897             break;
6898         case 2:
6899             /* Floating point data-processing (2 source) */
6900             disas_fp_2src(s, insn);
6901             break;
6902         case 3:
6903             /* Floating point conditional select */
6904             disas_fp_csel(s, insn);
6905             break;
6906         case 0:
6907             switch (ctz32(extract32(insn, 12, 4))) {
6908             case 0: /* [15:12] == xxx1 */
6909                 /* Floating point immediate */
6910                 disas_fp_imm(s, insn);
6911                 break;
6912             case 1: /* [15:12] == xx10 */
6913                 /* Floating point compare */
6914                 disas_fp_compare(s, insn);
6915                 break;
6916             case 2: /* [15:12] == x100 */
6917                 /* Floating point data-processing (1 source) */
6918                 disas_fp_1src(s, insn);
6919                 break;
6920             case 3: /* [15:12] == 1000 */
6921                 unallocated_encoding(s);
6922                 break;
6923             default: /* [15:12] == 0000 */
6924                 /* Floating point <-> integer conversions */
6925                 disas_fp_int_conv(s, insn);
6926                 break;
6927             }
6928             break;
6929         }
6930     }
6931 }
6932 
do_ext64(DisasContext * s,TCGv_i64 tcg_left,TCGv_i64 tcg_right,int pos)6933 static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
6934                      int pos)
6935 {
6936     /* Extract 64 bits from the middle of two concatenated 64 bit
6937      * vector register slices left:right. The extracted bits start
6938      * at 'pos' bits into the right (least significant) side.
6939      * We return the result in tcg_right, and guarantee not to
6940      * trash tcg_left.
6941      */
6942     TCGv_i64 tcg_tmp = tcg_temp_new_i64();
6943     assert(pos > 0 && pos < 64);
6944 
6945     tcg_gen_shri_i64(tcg_right, tcg_right, pos);
6946     tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
6947     tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
6948 
6949     tcg_temp_free_i64(tcg_tmp);
6950 }
6951 
6952 /* EXT
6953  *   31  30 29         24 23 22  21 20  16 15  14  11 10  9    5 4    0
6954  * +---+---+-------------+-----+---+------+---+------+---+------+------+
6955  * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 |  Rm  | 0 | imm4 | 0 |  Rn  |  Rd  |
6956  * +---+---+-------------+-----+---+------+---+------+---+------+------+
6957  */
disas_simd_ext(DisasContext * s,uint32_t insn)6958 static void disas_simd_ext(DisasContext *s, uint32_t insn)
6959 {
6960     int is_q = extract32(insn, 30, 1);
6961     int op2 = extract32(insn, 22, 2);
6962     int imm4 = extract32(insn, 11, 4);
6963     int rm = extract32(insn, 16, 5);
6964     int rn = extract32(insn, 5, 5);
6965     int rd = extract32(insn, 0, 5);
6966     int pos = imm4 << 3;
6967     TCGv_i64 tcg_resl, tcg_resh;
6968 
6969     if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
6970         unallocated_encoding(s);
6971         return;
6972     }
6973 
6974     if (!fp_access_check(s)) {
6975         return;
6976     }
6977 
6978     tcg_resh = tcg_temp_new_i64();
6979     tcg_resl = tcg_temp_new_i64();
6980 
6981     /* Vd gets bits starting at pos bits into Vm:Vn. This is
6982      * either extracting 128 bits from a 128:128 concatenation, or
6983      * extracting 64 bits from a 64:64 concatenation.
6984      */
6985     if (!is_q) {
6986         read_vec_element(s, tcg_resl, rn, 0, MO_64);
6987         if (pos != 0) {
6988             read_vec_element(s, tcg_resh, rm, 0, MO_64);
6989             do_ext64(s, tcg_resh, tcg_resl, pos);
6990         }
6991         tcg_gen_movi_i64(tcg_resh, 0);
6992     } else {
6993         TCGv_i64 tcg_hh;
6994         typedef struct {
6995             int reg;
6996             int elt;
6997         } EltPosns;
6998         EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
6999         EltPosns *elt = eltposns;
7000 
7001         if (pos >= 64) {
7002             elt++;
7003             pos -= 64;
7004         }
7005 
7006         read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
7007         elt++;
7008         read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
7009         elt++;
7010         if (pos != 0) {
7011             do_ext64(s, tcg_resh, tcg_resl, pos);
7012             tcg_hh = tcg_temp_new_i64();
7013             read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
7014             do_ext64(s, tcg_hh, tcg_resh, pos);
7015             tcg_temp_free_i64(tcg_hh);
7016         }
7017     }
7018 
7019     write_vec_element(s, tcg_resl, rd, 0, MO_64);
7020     tcg_temp_free_i64(tcg_resl);
7021     write_vec_element(s, tcg_resh, rd, 1, MO_64);
7022     tcg_temp_free_i64(tcg_resh);
7023     clear_vec_high(s, true, rd);
7024 }
7025 
7026 /* TBL/TBX
7027  *   31  30 29         24 23 22  21 20  16 15  14 13  12  11 10 9    5 4    0
7028  * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7029  * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 |  Rm  | 0 | len | op | 0 0 |  Rn  |  Rd  |
7030  * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7031  */
disas_simd_tb(DisasContext * s,uint32_t insn)7032 static void disas_simd_tb(DisasContext *s, uint32_t insn)
7033 {
7034     int op2 = extract32(insn, 22, 2);
7035     int is_q = extract32(insn, 30, 1);
7036     int rm = extract32(insn, 16, 5);
7037     int rn = extract32(insn, 5, 5);
7038     int rd = extract32(insn, 0, 5);
7039     int is_tblx = extract32(insn, 12, 1);
7040     int len = extract32(insn, 13, 2);
7041     TCGv_i64 tcg_resl, tcg_resh, tcg_idx;
7042     TCGv_i32 tcg_regno, tcg_numregs;
7043 
7044     if (op2 != 0) {
7045         unallocated_encoding(s);
7046         return;
7047     }
7048 
7049     if (!fp_access_check(s)) {
7050         return;
7051     }
7052 
7053     /* This does a table lookup: for every byte element in the input
7054      * we index into a table formed from up to four vector registers,
7055      * and then the output is the result of the lookups. Our helper
7056      * function does the lookup operation for a single 64 bit part of
7057      * the input.
7058      */
7059     tcg_resl = tcg_temp_new_i64();
7060     tcg_resh = tcg_temp_new_i64();
7061 
7062     if (is_tblx) {
7063         read_vec_element(s, tcg_resl, rd, 0, MO_64);
7064     } else {
7065         tcg_gen_movi_i64(tcg_resl, 0);
7066     }
7067     if (is_tblx && is_q) {
7068         read_vec_element(s, tcg_resh, rd, 1, MO_64);
7069     } else {
7070         tcg_gen_movi_i64(tcg_resh, 0);
7071     }
7072 
7073     tcg_idx = tcg_temp_new_i64();
7074     tcg_regno = tcg_const_i32(rn);
7075     tcg_numregs = tcg_const_i32(len + 1);
7076     read_vec_element(s, tcg_idx, rm, 0, MO_64);
7077     gen_helper_simd_tbl(tcg_resl, cpu_env, tcg_resl, tcg_idx,
7078                         tcg_regno, tcg_numregs);
7079     if (is_q) {
7080         read_vec_element(s, tcg_idx, rm, 1, MO_64);
7081         gen_helper_simd_tbl(tcg_resh, cpu_env, tcg_resh, tcg_idx,
7082                             tcg_regno, tcg_numregs);
7083     }
7084     tcg_temp_free_i64(tcg_idx);
7085     tcg_temp_free_i32(tcg_regno);
7086     tcg_temp_free_i32(tcg_numregs);
7087 
7088     write_vec_element(s, tcg_resl, rd, 0, MO_64);
7089     tcg_temp_free_i64(tcg_resl);
7090     write_vec_element(s, tcg_resh, rd, 1, MO_64);
7091     tcg_temp_free_i64(tcg_resh);
7092     clear_vec_high(s, true, rd);
7093 }
7094 
7095 /* ZIP/UZP/TRN
7096  *   31  30 29         24 23  22  21 20   16 15 14 12 11 10 9    5 4    0
7097  * +---+---+-------------+------+---+------+---+------------------+------+
7098  * | 0 | Q | 0 0 1 1 1 0 | size | 0 |  Rm  | 0 | opc | 1 0 |  Rn  |  Rd  |
7099  * +---+---+-------------+------+---+------+---+------------------+------+
7100  */
disas_simd_zip_trn(DisasContext * s,uint32_t insn)7101 static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
7102 {
7103     int rd = extract32(insn, 0, 5);
7104     int rn = extract32(insn, 5, 5);
7105     int rm = extract32(insn, 16, 5);
7106     int size = extract32(insn, 22, 2);
7107     /* opc field bits [1:0] indicate ZIP/UZP/TRN;
7108      * bit 2 indicates 1 vs 2 variant of the insn.
7109      */
7110     int opcode = extract32(insn, 12, 2);
7111     bool part = extract32(insn, 14, 1);
7112     bool is_q = extract32(insn, 30, 1);
7113     int esize = 8 << size;
7114     int i, ofs;
7115     int datasize = is_q ? 128 : 64;
7116     int elements = datasize / esize;
7117     TCGv_i64 tcg_res, tcg_resl, tcg_resh;
7118 
7119     if (opcode == 0 || (size == 3 && !is_q)) {
7120         unallocated_encoding(s);
7121         return;
7122     }
7123 
7124     if (!fp_access_check(s)) {
7125         return;
7126     }
7127 
7128     tcg_resl = tcg_const_i64(0);
7129     tcg_resh = tcg_const_i64(0);
7130     tcg_res = tcg_temp_new_i64();
7131 
7132     for (i = 0; i < elements; i++) {
7133         switch (opcode) {
7134         case 1: /* UZP1/2 */
7135         {
7136             int midpoint = elements / 2;
7137             if (i < midpoint) {
7138                 read_vec_element(s, tcg_res, rn, 2 * i + part, size);
7139             } else {
7140                 read_vec_element(s, tcg_res, rm,
7141                                  2 * (i - midpoint) + part, size);
7142             }
7143             break;
7144         }
7145         case 2: /* TRN1/2 */
7146             if (i & 1) {
7147                 read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
7148             } else {
7149                 read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
7150             }
7151             break;
7152         case 3: /* ZIP1/2 */
7153         {
7154             int base = part * elements / 2;
7155             if (i & 1) {
7156                 read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
7157             } else {
7158                 read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
7159             }
7160             break;
7161         }
7162         default:
7163             g_assert_not_reached();
7164         }
7165 
7166         ofs = i * esize;
7167         if (ofs < 64) {
7168             tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
7169             tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
7170         } else {
7171             tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
7172             tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
7173         }
7174     }
7175 
7176     tcg_temp_free_i64(tcg_res);
7177 
7178     write_vec_element(s, tcg_resl, rd, 0, MO_64);
7179     tcg_temp_free_i64(tcg_resl);
7180     write_vec_element(s, tcg_resh, rd, 1, MO_64);
7181     tcg_temp_free_i64(tcg_resh);
7182     clear_vec_high(s, true, rd);
7183 }
7184 
7185 /*
7186  * do_reduction_op helper
7187  *
7188  * This mirrors the Reduce() pseudocode in the ARM ARM. It is
7189  * important for correct NaN propagation that we do these
7190  * operations in exactly the order specified by the pseudocode.
7191  *
7192  * This is a recursive function, TCG temps should be freed by the
7193  * calling function once it is done with the values.
7194  */
do_reduction_op(DisasContext * s,int fpopcode,int rn,int esize,int size,int vmap,TCGv_ptr fpst)7195 static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
7196                                 int esize, int size, int vmap, TCGv_ptr fpst)
7197 {
7198     if (esize == size) {
7199         int element;
7200         MemOp msize = esize == 16 ? MO_16 : MO_32;
7201         TCGv_i32 tcg_elem;
7202 
7203         /* We should have one register left here */
7204         assert(ctpop8(vmap) == 1);
7205         element = ctz32(vmap);
7206         assert(element < 8);
7207 
7208         tcg_elem = tcg_temp_new_i32();
7209         read_vec_element_i32(s, tcg_elem, rn, element, msize);
7210         return tcg_elem;
7211     } else {
7212         int bits = size / 2;
7213         int shift = ctpop8(vmap) / 2;
7214         int vmap_lo = (vmap >> shift) & vmap;
7215         int vmap_hi = (vmap & ~vmap_lo);
7216         TCGv_i32 tcg_hi, tcg_lo, tcg_res;
7217 
7218         tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
7219         tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
7220         tcg_res = tcg_temp_new_i32();
7221 
7222         switch (fpopcode) {
7223         case 0x0c: /* fmaxnmv half-precision */
7224             gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7225             break;
7226         case 0x0f: /* fmaxv half-precision */
7227             gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
7228             break;
7229         case 0x1c: /* fminnmv half-precision */
7230             gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7231             break;
7232         case 0x1f: /* fminv half-precision */
7233             gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
7234             break;
7235         case 0x2c: /* fmaxnmv */
7236             gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
7237             break;
7238         case 0x2f: /* fmaxv */
7239             gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
7240             break;
7241         case 0x3c: /* fminnmv */
7242             gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
7243             break;
7244         case 0x3f: /* fminv */
7245             gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
7246             break;
7247         default:
7248             g_assert_not_reached();
7249         }
7250 
7251         tcg_temp_free_i32(tcg_hi);
7252         tcg_temp_free_i32(tcg_lo);
7253         return tcg_res;
7254     }
7255 }
7256 
7257 /* AdvSIMD across lanes
7258  *   31  30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
7259  * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7260  * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
7261  * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7262  */
disas_simd_across_lanes(DisasContext * s,uint32_t insn)7263 static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
7264 {
7265     int rd = extract32(insn, 0, 5);
7266     int rn = extract32(insn, 5, 5);
7267     int size = extract32(insn, 22, 2);
7268     int opcode = extract32(insn, 12, 5);
7269     bool is_q = extract32(insn, 30, 1);
7270     bool is_u = extract32(insn, 29, 1);
7271     bool is_fp = false;
7272     bool is_min = false;
7273     int esize;
7274     int elements;
7275     int i;
7276     TCGv_i64 tcg_res, tcg_elt;
7277 
7278     switch (opcode) {
7279     case 0x1b: /* ADDV */
7280         if (is_u) {
7281             unallocated_encoding(s);
7282             return;
7283         }
7284         /* fall through */
7285     case 0x3: /* SADDLV, UADDLV */
7286     case 0xa: /* SMAXV, UMAXV */
7287     case 0x1a: /* SMINV, UMINV */
7288         if (size == 3 || (size == 2 && !is_q)) {
7289             unallocated_encoding(s);
7290             return;
7291         }
7292         break;
7293     case 0xc: /* FMAXNMV, FMINNMV */
7294     case 0xf: /* FMAXV, FMINV */
7295         /* Bit 1 of size field encodes min vs max and the actual size
7296          * depends on the encoding of the U bit. If not set (and FP16
7297          * enabled) then we do half-precision float instead of single
7298          * precision.
7299          */
7300         is_min = extract32(size, 1, 1);
7301         is_fp = true;
7302         if (!is_u && dc_isar_feature(aa64_fp16, s)) {
7303             size = 1;
7304         } else if (!is_u || !is_q || extract32(size, 0, 1)) {
7305             unallocated_encoding(s);
7306             return;
7307         } else {
7308             size = 2;
7309         }
7310         break;
7311     default:
7312         unallocated_encoding(s);
7313         return;
7314     }
7315 
7316     if (!fp_access_check(s)) {
7317         return;
7318     }
7319 
7320     esize = 8 << size;
7321     elements = (is_q ? 128 : 64) / esize;
7322 
7323     tcg_res = tcg_temp_new_i64();
7324     tcg_elt = tcg_temp_new_i64();
7325 
7326     /* These instructions operate across all lanes of a vector
7327      * to produce a single result. We can guarantee that a 64
7328      * bit intermediate is sufficient:
7329      *  + for [US]ADDLV the maximum element size is 32 bits, and
7330      *    the result type is 64 bits
7331      *  + for FMAX*V, FMIN*V, ADDV the intermediate type is the
7332      *    same as the element size, which is 32 bits at most
7333      * For the integer operations we can choose to work at 64
7334      * or 32 bits and truncate at the end; for simplicity
7335      * we use 64 bits always. The floating point
7336      * ops do require 32 bit intermediates, though.
7337      */
7338     if (!is_fp) {
7339         read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
7340 
7341         for (i = 1; i < elements; i++) {
7342             read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
7343 
7344             switch (opcode) {
7345             case 0x03: /* SADDLV / UADDLV */
7346             case 0x1b: /* ADDV */
7347                 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
7348                 break;
7349             case 0x0a: /* SMAXV / UMAXV */
7350                 if (is_u) {
7351                     tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
7352                 } else {
7353                     tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
7354                 }
7355                 break;
7356             case 0x1a: /* SMINV / UMINV */
7357                 if (is_u) {
7358                     tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
7359                 } else {
7360                     tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
7361                 }
7362                 break;
7363             default:
7364                 g_assert_not_reached();
7365             }
7366 
7367         }
7368     } else {
7369         /* Floating point vector reduction ops which work across 32
7370          * bit (single) or 16 bit (half-precision) intermediates.
7371          * Note that correct NaN propagation requires that we do these
7372          * operations in exactly the order specified by the pseudocode.
7373          */
7374         TCGv_ptr fpst = get_fpstatus_ptr(size == MO_16);
7375         int fpopcode = opcode | is_min << 4 | is_u << 5;
7376         int vmap = (1 << elements) - 1;
7377         TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
7378                                              (is_q ? 128 : 64), vmap, fpst);
7379         tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
7380         tcg_temp_free_i32(tcg_res32);
7381         tcg_temp_free_ptr(fpst);
7382     }
7383 
7384     tcg_temp_free_i64(tcg_elt);
7385 
7386     /* Now truncate the result to the width required for the final output */
7387     if (opcode == 0x03) {
7388         /* SADDLV, UADDLV: result is 2*esize */
7389         size++;
7390     }
7391 
7392     switch (size) {
7393     case 0:
7394         tcg_gen_ext8u_i64(tcg_res, tcg_res);
7395         break;
7396     case 1:
7397         tcg_gen_ext16u_i64(tcg_res, tcg_res);
7398         break;
7399     case 2:
7400         tcg_gen_ext32u_i64(tcg_res, tcg_res);
7401         break;
7402     case 3:
7403         break;
7404     default:
7405         g_assert_not_reached();
7406     }
7407 
7408     write_fp_dreg(s, rd, tcg_res);
7409     tcg_temp_free_i64(tcg_res);
7410 }
7411 
7412 /* DUP (Element, Vector)
7413  *
7414  *  31  30   29              21 20    16 15        10  9    5 4    0
7415  * +---+---+-------------------+--------+-------------+------+------+
7416  * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 0 1 |  Rn  |  Rd  |
7417  * +---+---+-------------------+--------+-------------+------+------+
7418  *
7419  * size: encoded in imm5 (see ARM ARM LowestSetBit())
7420  */
handle_simd_dupe(DisasContext * s,int is_q,int rd,int rn,int imm5)7421 static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
7422                              int imm5)
7423 {
7424     int size = ctz32(imm5);
7425     int index = imm5 >> (size + 1);
7426 
7427     if (size > 3 || (size == 3 && !is_q)) {
7428         unallocated_encoding(s);
7429         return;
7430     }
7431 
7432     if (!fp_access_check(s)) {
7433         return;
7434     }
7435 
7436     tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
7437                          vec_reg_offset(s, rn, index, size),
7438                          is_q ? 16 : 8, vec_full_reg_size(s));
7439 }
7440 
7441 /* DUP (element, scalar)
7442  *  31                   21 20    16 15        10  9    5 4    0
7443  * +-----------------------+--------+-------------+------+------+
7444  * | 0 1 0 1 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 0 1 |  Rn  |  Rd  |
7445  * +-----------------------+--------+-------------+------+------+
7446  */
handle_simd_dupes(DisasContext * s,int rd,int rn,int imm5)7447 static void handle_simd_dupes(DisasContext *s, int rd, int rn,
7448                               int imm5)
7449 {
7450     int size = ctz32(imm5);
7451     int index;
7452     TCGv_i64 tmp;
7453 
7454     if (size > 3) {
7455         unallocated_encoding(s);
7456         return;
7457     }
7458 
7459     if (!fp_access_check(s)) {
7460         return;
7461     }
7462 
7463     index = imm5 >> (size + 1);
7464 
7465     /* This instruction just extracts the specified element and
7466      * zero-extends it into the bottom of the destination register.
7467      */
7468     tmp = tcg_temp_new_i64();
7469     read_vec_element(s, tmp, rn, index, size);
7470     write_fp_dreg(s, rd, tmp);
7471     tcg_temp_free_i64(tmp);
7472 }
7473 
7474 /* DUP (General)
7475  *
7476  *  31  30   29              21 20    16 15        10  9    5 4    0
7477  * +---+---+-------------------+--------+-------------+------+------+
7478  * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 1 1 |  Rn  |  Rd  |
7479  * +---+---+-------------------+--------+-------------+------+------+
7480  *
7481  * size: encoded in imm5 (see ARM ARM LowestSetBit())
7482  */
handle_simd_dupg(DisasContext * s,int is_q,int rd,int rn,int imm5)7483 static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
7484                              int imm5)
7485 {
7486     int size = ctz32(imm5);
7487     uint32_t dofs, oprsz, maxsz;
7488 
7489     if (size > 3 || ((size == 3) && !is_q)) {
7490         unallocated_encoding(s);
7491         return;
7492     }
7493 
7494     if (!fp_access_check(s)) {
7495         return;
7496     }
7497 
7498     dofs = vec_full_reg_offset(s, rd);
7499     oprsz = is_q ? 16 : 8;
7500     maxsz = vec_full_reg_size(s);
7501 
7502     tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
7503 }
7504 
7505 /* INS (Element)
7506  *
7507  *  31                   21 20    16 15  14    11  10 9    5 4    0
7508  * +-----------------------+--------+------------+---+------+------+
7509  * | 0 1 1 0 1 1 1 0 0 0 0 |  imm5  | 0 |  imm4  | 1 |  Rn  |  Rd  |
7510  * +-----------------------+--------+------------+---+------+------+
7511  *
7512  * size: encoded in imm5 (see ARM ARM LowestSetBit())
7513  * index: encoded in imm5<4:size+1>
7514  */
handle_simd_inse(DisasContext * s,int rd,int rn,int imm4,int imm5)7515 static void handle_simd_inse(DisasContext *s, int rd, int rn,
7516                              int imm4, int imm5)
7517 {
7518     int size = ctz32(imm5);
7519     int src_index, dst_index;
7520     TCGv_i64 tmp;
7521 
7522     if (size > 3) {
7523         unallocated_encoding(s);
7524         return;
7525     }
7526 
7527     if (!fp_access_check(s)) {
7528         return;
7529     }
7530 
7531     dst_index = extract32(imm5, 1+size, 5);
7532     src_index = extract32(imm4, size, 4);
7533 
7534     tmp = tcg_temp_new_i64();
7535 
7536     read_vec_element(s, tmp, rn, src_index, size);
7537     write_vec_element(s, tmp, rd, dst_index, size);
7538 
7539     tcg_temp_free_i64(tmp);
7540 
7541     /* INS is considered a 128-bit write for SVE. */
7542     clear_vec_high(s, true, rd);
7543 }
7544 
7545 
7546 /* INS (General)
7547  *
7548  *  31                   21 20    16 15        10  9    5 4    0
7549  * +-----------------------+--------+-------------+------+------+
7550  * | 0 1 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 1 1 1 |  Rn  |  Rd  |
7551  * +-----------------------+--------+-------------+------+------+
7552  *
7553  * size: encoded in imm5 (see ARM ARM LowestSetBit())
7554  * index: encoded in imm5<4:size+1>
7555  */
handle_simd_insg(DisasContext * s,int rd,int rn,int imm5)7556 static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
7557 {
7558     int size = ctz32(imm5);
7559     int idx;
7560 
7561     if (size > 3) {
7562         unallocated_encoding(s);
7563         return;
7564     }
7565 
7566     if (!fp_access_check(s)) {
7567         return;
7568     }
7569 
7570     idx = extract32(imm5, 1 + size, 4 - size);
7571     write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
7572 
7573     /* INS is considered a 128-bit write for SVE. */
7574     clear_vec_high(s, true, rd);
7575 }
7576 
7577 /*
7578  * UMOV (General)
7579  * SMOV (General)
7580  *
7581  *  31  30   29              21 20    16 15    12   10 9    5 4    0
7582  * +---+---+-------------------+--------+-------------+------+------+
7583  * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 1 U 1 1 |  Rn  |  Rd  |
7584  * +---+---+-------------------+--------+-------------+------+------+
7585  *
7586  * U: unsigned when set
7587  * size: encoded in imm5 (see ARM ARM LowestSetBit())
7588  */
handle_simd_umov_smov(DisasContext * s,int is_q,int is_signed,int rn,int rd,int imm5)7589 static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
7590                                   int rn, int rd, int imm5)
7591 {
7592     int size = ctz32(imm5);
7593     int element;
7594     TCGv_i64 tcg_rd;
7595 
7596     /* Check for UnallocatedEncodings */
7597     if (is_signed) {
7598         if (size > 2 || (size == 2 && !is_q)) {
7599             unallocated_encoding(s);
7600             return;
7601         }
7602     } else {
7603         if (size > 3
7604             || (size < 3 && is_q)
7605             || (size == 3 && !is_q)) {
7606             unallocated_encoding(s);
7607             return;
7608         }
7609     }
7610 
7611     if (!fp_access_check(s)) {
7612         return;
7613     }
7614 
7615     element = extract32(imm5, 1+size, 4);
7616 
7617     tcg_rd = cpu_reg(s, rd);
7618     read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
7619     if (is_signed && !is_q) {
7620         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
7621     }
7622 }
7623 
7624 /* AdvSIMD copy
7625  *   31  30  29  28             21 20  16 15  14  11 10  9    5 4    0
7626  * +---+---+----+-----------------+------+---+------+---+------+------+
7627  * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 |  Rn  |  Rd  |
7628  * +---+---+----+-----------------+------+---+------+---+------+------+
7629  */
disas_simd_copy(DisasContext * s,uint32_t insn)7630 static void disas_simd_copy(DisasContext *s, uint32_t insn)
7631 {
7632     int rd = extract32(insn, 0, 5);
7633     int rn = extract32(insn, 5, 5);
7634     int imm4 = extract32(insn, 11, 4);
7635     int op = extract32(insn, 29, 1);
7636     int is_q = extract32(insn, 30, 1);
7637     int imm5 = extract32(insn, 16, 5);
7638 
7639     if (op) {
7640         if (is_q) {
7641             /* INS (element) */
7642             handle_simd_inse(s, rd, rn, imm4, imm5);
7643         } else {
7644             unallocated_encoding(s);
7645         }
7646     } else {
7647         switch (imm4) {
7648         case 0:
7649             /* DUP (element - vector) */
7650             handle_simd_dupe(s, is_q, rd, rn, imm5);
7651             break;
7652         case 1:
7653             /* DUP (general) */
7654             handle_simd_dupg(s, is_q, rd, rn, imm5);
7655             break;
7656         case 3:
7657             if (is_q) {
7658                 /* INS (general) */
7659                 handle_simd_insg(s, rd, rn, imm5);
7660             } else {
7661                 unallocated_encoding(s);
7662             }
7663             break;
7664         case 5:
7665         case 7:
7666             /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
7667             handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
7668             break;
7669         default:
7670             unallocated_encoding(s);
7671             break;
7672         }
7673     }
7674 }
7675 
7676 /* AdvSIMD modified immediate
7677  *  31  30   29  28                 19 18 16 15   12  11  10  9     5 4    0
7678  * +---+---+----+---------------------+-----+-------+----+---+-------+------+
7679  * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh |  Rd  |
7680  * +---+---+----+---------------------+-----+-------+----+---+-------+------+
7681  *
7682  * There are a number of operations that can be carried out here:
7683  *   MOVI - move (shifted) imm into register
7684  *   MVNI - move inverted (shifted) imm into register
7685  *   ORR  - bitwise OR of (shifted) imm with register
7686  *   BIC  - bitwise clear of (shifted) imm with register
7687  * With ARMv8.2 we also have:
7688  *   FMOV half-precision
7689  */
disas_simd_mod_imm(DisasContext * s,uint32_t insn)7690 static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
7691 {
7692     int rd = extract32(insn, 0, 5);
7693     int cmode = extract32(insn, 12, 4);
7694     int cmode_3_1 = extract32(cmode, 1, 3);
7695     int cmode_0 = extract32(cmode, 0, 1);
7696     int o2 = extract32(insn, 11, 1);
7697     uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
7698     bool is_neg = extract32(insn, 29, 1);
7699     bool is_q = extract32(insn, 30, 1);
7700     uint64_t imm = 0;
7701 
7702     if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
7703         /* Check for FMOV (vector, immediate) - half-precision */
7704         if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) {
7705             unallocated_encoding(s);
7706             return;
7707         }
7708     }
7709 
7710     if (!fp_access_check(s)) {
7711         return;
7712     }
7713 
7714     /* See AdvSIMDExpandImm() in ARM ARM */
7715     switch (cmode_3_1) {
7716     case 0: /* Replicate(Zeros(24):imm8, 2) */
7717     case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
7718     case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
7719     case 3: /* Replicate(imm8:Zeros(24), 2) */
7720     {
7721         int shift = cmode_3_1 * 8;
7722         imm = bitfield_replicate(abcdefgh << shift, 32);
7723         break;
7724     }
7725     case 4: /* Replicate(Zeros(8):imm8, 4) */
7726     case 5: /* Replicate(imm8:Zeros(8), 4) */
7727     {
7728         int shift = (cmode_3_1 & 0x1) * 8;
7729         imm = bitfield_replicate(abcdefgh << shift, 16);
7730         break;
7731     }
7732     case 6:
7733         if (cmode_0) {
7734             /* Replicate(Zeros(8):imm8:Ones(16), 2) */
7735             imm = (abcdefgh << 16) | 0xffff;
7736         } else {
7737             /* Replicate(Zeros(16):imm8:Ones(8), 2) */
7738             imm = (abcdefgh << 8) | 0xff;
7739         }
7740         imm = bitfield_replicate(imm, 32);
7741         break;
7742     case 7:
7743         if (!cmode_0 && !is_neg) {
7744             imm = bitfield_replicate(abcdefgh, 8);
7745         } else if (!cmode_0 && is_neg) {
7746             int i;
7747             imm = 0;
7748             for (i = 0; i < 8; i++) {
7749                 if ((abcdefgh) & (1 << i)) {
7750                     imm |= 0xffULL << (i * 8);
7751                 }
7752             }
7753         } else if (cmode_0) {
7754             if (is_neg) {
7755                 imm = (abcdefgh & 0x3f) << 48;
7756                 if (abcdefgh & 0x80) {
7757                     imm |= 0x8000000000000000ULL;
7758                 }
7759                 if (abcdefgh & 0x40) {
7760                     imm |= 0x3fc0000000000000ULL;
7761                 } else {
7762                     imm |= 0x4000000000000000ULL;
7763                 }
7764             } else {
7765                 if (o2) {
7766                     /* FMOV (vector, immediate) - half-precision */
7767                     imm = vfp_expand_imm(MO_16, abcdefgh);
7768                     /* now duplicate across the lanes */
7769                     imm = bitfield_replicate(imm, 16);
7770                 } else {
7771                     imm = (abcdefgh & 0x3f) << 19;
7772                     if (abcdefgh & 0x80) {
7773                         imm |= 0x80000000;
7774                     }
7775                     if (abcdefgh & 0x40) {
7776                         imm |= 0x3e000000;
7777                     } else {
7778                         imm |= 0x40000000;
7779                     }
7780                     imm |= (imm << 32);
7781                 }
7782             }
7783         }
7784         break;
7785     default:
7786         fprintf(stderr, "%s: cmode_3_1: %x\n", __func__, cmode_3_1);
7787         g_assert_not_reached();
7788     }
7789 
7790     if (cmode_3_1 != 7 && is_neg) {
7791         imm = ~imm;
7792     }
7793 
7794     if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
7795         /* MOVI or MVNI, with MVNI negation handled above.  */
7796         tcg_gen_gvec_dup64i(vec_full_reg_offset(s, rd), is_q ? 16 : 8,
7797                             vec_full_reg_size(s), imm);
7798     } else {
7799         /* ORR or BIC, with BIC negation to AND handled above.  */
7800         if (is_neg) {
7801             gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
7802         } else {
7803             gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
7804         }
7805     }
7806 }
7807 
7808 /* AdvSIMD scalar copy
7809  *  31 30  29  28             21 20  16 15  14  11 10  9    5 4    0
7810  * +-----+----+-----------------+------+---+------+---+------+------+
7811  * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 |  Rn  |  Rd  |
7812  * +-----+----+-----------------+------+---+------+---+------+------+
7813  */
disas_simd_scalar_copy(DisasContext * s,uint32_t insn)7814 static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
7815 {
7816     int rd = extract32(insn, 0, 5);
7817     int rn = extract32(insn, 5, 5);
7818     int imm4 = extract32(insn, 11, 4);
7819     int imm5 = extract32(insn, 16, 5);
7820     int op = extract32(insn, 29, 1);
7821 
7822     if (op != 0 || imm4 != 0) {
7823         unallocated_encoding(s);
7824         return;
7825     }
7826 
7827     /* DUP (element, scalar) */
7828     handle_simd_dupes(s, rd, rn, imm5);
7829 }
7830 
7831 /* AdvSIMD scalar pairwise
7832  *  31 30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
7833  * +-----+---+-----------+------+-----------+--------+-----+------+------+
7834  * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
7835  * +-----+---+-----------+------+-----------+--------+-----+------+------+
7836  */
disas_simd_scalar_pairwise(DisasContext * s,uint32_t insn)7837 static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
7838 {
7839     int u = extract32(insn, 29, 1);
7840     int size = extract32(insn, 22, 2);
7841     int opcode = extract32(insn, 12, 5);
7842     int rn = extract32(insn, 5, 5);
7843     int rd = extract32(insn, 0, 5);
7844     TCGv_ptr fpst;
7845 
7846     /* For some ops (the FP ones), size[1] is part of the encoding.
7847      * For ADDP strictly it is not but size[1] is always 1 for valid
7848      * encodings.
7849      */
7850     opcode |= (extract32(size, 1, 1) << 5);
7851 
7852     switch (opcode) {
7853     case 0x3b: /* ADDP */
7854         if (u || size != 3) {
7855             unallocated_encoding(s);
7856             return;
7857         }
7858         if (!fp_access_check(s)) {
7859             return;
7860         }
7861 
7862         fpst = NULL;
7863         break;
7864     case 0xc: /* FMAXNMP */
7865     case 0xd: /* FADDP */
7866     case 0xf: /* FMAXP */
7867     case 0x2c: /* FMINNMP */
7868     case 0x2f: /* FMINP */
7869         /* FP op, size[0] is 32 or 64 bit*/
7870         if (!u) {
7871             if (!dc_isar_feature(aa64_fp16, s)) {
7872                 unallocated_encoding(s);
7873                 return;
7874             } else {
7875                 size = MO_16;
7876             }
7877         } else {
7878             size = extract32(size, 0, 1) ? MO_64 : MO_32;
7879         }
7880 
7881         if (!fp_access_check(s)) {
7882             return;
7883         }
7884 
7885         fpst = get_fpstatus_ptr(size == MO_16);
7886         break;
7887     default:
7888         unallocated_encoding(s);
7889         return;
7890     }
7891 
7892     if (size == MO_64) {
7893         TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7894         TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7895         TCGv_i64 tcg_res = tcg_temp_new_i64();
7896 
7897         read_vec_element(s, tcg_op1, rn, 0, MO_64);
7898         read_vec_element(s, tcg_op2, rn, 1, MO_64);
7899 
7900         switch (opcode) {
7901         case 0x3b: /* ADDP */
7902             tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
7903             break;
7904         case 0xc: /* FMAXNMP */
7905             gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7906             break;
7907         case 0xd: /* FADDP */
7908             gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
7909             break;
7910         case 0xf: /* FMAXP */
7911             gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
7912             break;
7913         case 0x2c: /* FMINNMP */
7914             gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7915             break;
7916         case 0x2f: /* FMINP */
7917             gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
7918             break;
7919         default:
7920             g_assert_not_reached();
7921         }
7922 
7923         write_fp_dreg(s, rd, tcg_res);
7924 
7925         tcg_temp_free_i64(tcg_op1);
7926         tcg_temp_free_i64(tcg_op2);
7927         tcg_temp_free_i64(tcg_res);
7928     } else {
7929         TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7930         TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7931         TCGv_i32 tcg_res = tcg_temp_new_i32();
7932 
7933         read_vec_element_i32(s, tcg_op1, rn, 0, size);
7934         read_vec_element_i32(s, tcg_op2, rn, 1, size);
7935 
7936         if (size == MO_16) {
7937             switch (opcode) {
7938             case 0xc: /* FMAXNMP */
7939                 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
7940                 break;
7941             case 0xd: /* FADDP */
7942                 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
7943                 break;
7944             case 0xf: /* FMAXP */
7945                 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
7946                 break;
7947             case 0x2c: /* FMINNMP */
7948                 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
7949                 break;
7950             case 0x2f: /* FMINP */
7951                 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
7952                 break;
7953             default:
7954                 g_assert_not_reached();
7955             }
7956         } else {
7957             switch (opcode) {
7958             case 0xc: /* FMAXNMP */
7959                 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
7960                 break;
7961             case 0xd: /* FADDP */
7962                 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
7963                 break;
7964             case 0xf: /* FMAXP */
7965                 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
7966                 break;
7967             case 0x2c: /* FMINNMP */
7968                 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
7969                 break;
7970             case 0x2f: /* FMINP */
7971                 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
7972                 break;
7973             default:
7974                 g_assert_not_reached();
7975             }
7976         }
7977 
7978         write_fp_sreg(s, rd, tcg_res);
7979 
7980         tcg_temp_free_i32(tcg_op1);
7981         tcg_temp_free_i32(tcg_op2);
7982         tcg_temp_free_i32(tcg_res);
7983     }
7984 
7985     if (fpst) {
7986         tcg_temp_free_ptr(fpst);
7987     }
7988 }
7989 
7990 /*
7991  * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
7992  *
7993  * This code is handles the common shifting code and is used by both
7994  * the vector and scalar code.
7995  */
handle_shri_with_rndacc(TCGv_i64 tcg_res,TCGv_i64 tcg_src,TCGv_i64 tcg_rnd,bool accumulate,bool is_u,int size,int shift)7996 static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
7997                                     TCGv_i64 tcg_rnd, bool accumulate,
7998                                     bool is_u, int size, int shift)
7999 {
8000     bool extended_result = false;
8001     bool round = tcg_rnd != NULL;
8002     int ext_lshift = 0;
8003     TCGv_i64 tcg_src_hi;
8004 
8005     if (round && size == 3) {
8006         extended_result = true;
8007         ext_lshift = 64 - shift;
8008         tcg_src_hi = tcg_temp_new_i64();
8009     } else if (shift == 64) {
8010         if (!accumulate && is_u) {
8011             /* result is zero */
8012             tcg_gen_movi_i64(tcg_res, 0);
8013             return;
8014         }
8015     }
8016 
8017     /* Deal with the rounding step */
8018     if (round) {
8019         if (extended_result) {
8020             TCGv_i64 tcg_zero = tcg_const_i64(0);
8021             if (!is_u) {
8022                 /* take care of sign extending tcg_res */
8023                 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
8024                 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8025                                  tcg_src, tcg_src_hi,
8026                                  tcg_rnd, tcg_zero);
8027             } else {
8028                 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8029                                  tcg_src, tcg_zero,
8030                                  tcg_rnd, tcg_zero);
8031             }
8032             tcg_temp_free_i64(tcg_zero);
8033         } else {
8034             tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
8035         }
8036     }
8037 
8038     /* Now do the shift right */
8039     if (round && extended_result) {
8040         /* extended case, >64 bit precision required */
8041         if (ext_lshift == 0) {
8042             /* special case, only high bits matter */
8043             tcg_gen_mov_i64(tcg_src, tcg_src_hi);
8044         } else {
8045             tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8046             tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
8047             tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
8048         }
8049     } else {
8050         if (is_u) {
8051             if (shift == 64) {
8052                 /* essentially shifting in 64 zeros */
8053                 tcg_gen_movi_i64(tcg_src, 0);
8054             } else {
8055                 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8056             }
8057         } else {
8058             if (shift == 64) {
8059                 /* effectively extending the sign-bit */
8060                 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
8061             } else {
8062                 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
8063             }
8064         }
8065     }
8066 
8067     if (accumulate) {
8068         tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
8069     } else {
8070         tcg_gen_mov_i64(tcg_res, tcg_src);
8071     }
8072 
8073     if (extended_result) {
8074         tcg_temp_free_i64(tcg_src_hi);
8075     }
8076 }
8077 
8078 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
handle_scalar_simd_shri(DisasContext * s,bool is_u,int immh,int immb,int opcode,int rn,int rd)8079 static void handle_scalar_simd_shri(DisasContext *s,
8080                                     bool is_u, int immh, int immb,
8081                                     int opcode, int rn, int rd)
8082 {
8083     const int size = 3;
8084     int immhb = immh << 3 | immb;
8085     int shift = 2 * (8 << size) - immhb;
8086     bool accumulate = false;
8087     bool round = false;
8088     bool insert = false;
8089     TCGv_i64 tcg_rn;
8090     TCGv_i64 tcg_rd;
8091     TCGv_i64 tcg_round;
8092 
8093     if (!extract32(immh, 3, 1)) {
8094         unallocated_encoding(s);
8095         return;
8096     }
8097 
8098     if (!fp_access_check(s)) {
8099         return;
8100     }
8101 
8102     switch (opcode) {
8103     case 0x02: /* SSRA / USRA (accumulate) */
8104         accumulate = true;
8105         break;
8106     case 0x04: /* SRSHR / URSHR (rounding) */
8107         round = true;
8108         break;
8109     case 0x06: /* SRSRA / URSRA (accum + rounding) */
8110         accumulate = round = true;
8111         break;
8112     case 0x08: /* SRI */
8113         insert = true;
8114         break;
8115     }
8116 
8117     if (round) {
8118         uint64_t round_const = 1ULL << (shift - 1);
8119         tcg_round = tcg_const_i64(round_const);
8120     } else {
8121         tcg_round = NULL;
8122     }
8123 
8124     tcg_rn = read_fp_dreg(s, rn);
8125     tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8126 
8127     if (insert) {
8128         /* shift count same as element size is valid but does nothing;
8129          * special case to avoid potential shift by 64.
8130          */
8131         int esize = 8 << size;
8132         if (shift != esize) {
8133             tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
8134             tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
8135         }
8136     } else {
8137         handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8138                                 accumulate, is_u, size, shift);
8139     }
8140 
8141     write_fp_dreg(s, rd, tcg_rd);
8142 
8143     tcg_temp_free_i64(tcg_rn);
8144     tcg_temp_free_i64(tcg_rd);
8145     if (round) {
8146         tcg_temp_free_i64(tcg_round);
8147     }
8148 }
8149 
8150 /* SHL/SLI - Scalar shift left */
handle_scalar_simd_shli(DisasContext * s,bool insert,int immh,int immb,int opcode,int rn,int rd)8151 static void handle_scalar_simd_shli(DisasContext *s, bool insert,
8152                                     int immh, int immb, int opcode,
8153                                     int rn, int rd)
8154 {
8155     int size = 32 - clz32(immh) - 1;
8156     int immhb = immh << 3 | immb;
8157     int shift = immhb - (8 << size);
8158     TCGv_i64 tcg_rn = new_tmp_a64(s);
8159     TCGv_i64 tcg_rd = new_tmp_a64(s);
8160 
8161     if (!extract32(immh, 3, 1)) {
8162         unallocated_encoding(s);
8163         return;
8164     }
8165 
8166     if (!fp_access_check(s)) {
8167         return;
8168     }
8169 
8170     tcg_rn = read_fp_dreg(s, rn);
8171     tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8172 
8173     if (insert) {
8174         tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
8175     } else {
8176         tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
8177     }
8178 
8179     write_fp_dreg(s, rd, tcg_rd);
8180 
8181     tcg_temp_free_i64(tcg_rn);
8182     tcg_temp_free_i64(tcg_rd);
8183 }
8184 
8185 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
8186  * (signed/unsigned) narrowing */
handle_vec_simd_sqshrn(DisasContext * s,bool is_scalar,bool is_q,bool is_u_shift,bool is_u_narrow,int immh,int immb,int opcode,int rn,int rd)8187 static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
8188                                    bool is_u_shift, bool is_u_narrow,
8189                                    int immh, int immb, int opcode,
8190                                    int rn, int rd)
8191 {
8192     int immhb = immh << 3 | immb;
8193     int size = 32 - clz32(immh) - 1;
8194     int esize = 8 << size;
8195     int shift = (2 * esize) - immhb;
8196     int elements = is_scalar ? 1 : (64 / esize);
8197     bool round = extract32(opcode, 0, 1);
8198     MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
8199     TCGv_i64 tcg_rn, tcg_rd, tcg_round;
8200     TCGv_i32 tcg_rd_narrowed;
8201     TCGv_i64 tcg_final;
8202 
8203     static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
8204         { gen_helper_neon_narrow_sat_s8,
8205           gen_helper_neon_unarrow_sat8 },
8206         { gen_helper_neon_narrow_sat_s16,
8207           gen_helper_neon_unarrow_sat16 },
8208         { gen_helper_neon_narrow_sat_s32,
8209           gen_helper_neon_unarrow_sat32 },
8210         { NULL, NULL },
8211     };
8212     static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
8213         gen_helper_neon_narrow_sat_u8,
8214         gen_helper_neon_narrow_sat_u16,
8215         gen_helper_neon_narrow_sat_u32,
8216         NULL
8217     };
8218     NeonGenNarrowEnvFn *narrowfn;
8219 
8220     int i;
8221 
8222     assert(size < 4);
8223 
8224     if (extract32(immh, 3, 1)) {
8225         unallocated_encoding(s);
8226         return;
8227     }
8228 
8229     if (!fp_access_check(s)) {
8230         return;
8231     }
8232 
8233     if (is_u_shift) {
8234         narrowfn = unsigned_narrow_fns[size];
8235     } else {
8236         narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
8237     }
8238 
8239     tcg_rn = tcg_temp_new_i64();
8240     tcg_rd = tcg_temp_new_i64();
8241     tcg_rd_narrowed = tcg_temp_new_i32();
8242     tcg_final = tcg_const_i64(0);
8243 
8244     if (round) {
8245         uint64_t round_const = 1ULL << (shift - 1);
8246         tcg_round = tcg_const_i64(round_const);
8247     } else {
8248         tcg_round = NULL;
8249     }
8250 
8251     for (i = 0; i < elements; i++) {
8252         read_vec_element(s, tcg_rn, rn, i, ldop);
8253         handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8254                                 false, is_u_shift, size+1, shift);
8255         narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
8256         tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
8257         tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
8258     }
8259 
8260     if (!is_q) {
8261         write_vec_element(s, tcg_final, rd, 0, MO_64);
8262     } else {
8263         write_vec_element(s, tcg_final, rd, 1, MO_64);
8264     }
8265 
8266     if (round) {
8267         tcg_temp_free_i64(tcg_round);
8268     }
8269     tcg_temp_free_i64(tcg_rn);
8270     tcg_temp_free_i64(tcg_rd);
8271     tcg_temp_free_i32(tcg_rd_narrowed);
8272     tcg_temp_free_i64(tcg_final);
8273 
8274     clear_vec_high(s, is_q, rd);
8275 }
8276 
8277 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */
handle_simd_qshl(DisasContext * s,bool scalar,bool is_q,bool src_unsigned,bool dst_unsigned,int immh,int immb,int rn,int rd)8278 static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
8279                              bool src_unsigned, bool dst_unsigned,
8280                              int immh, int immb, int rn, int rd)
8281 {
8282     int immhb = immh << 3 | immb;
8283     int size = 32 - clz32(immh) - 1;
8284     int shift = immhb - (8 << size);
8285     int pass;
8286 
8287     assert(immh != 0);
8288     assert(!(scalar && is_q));
8289 
8290     if (!scalar) {
8291         if (!is_q && extract32(immh, 3, 1)) {
8292             unallocated_encoding(s);
8293             return;
8294         }
8295 
8296         /* Since we use the variable-shift helpers we must
8297          * replicate the shift count into each element of
8298          * the tcg_shift value.
8299          */
8300         switch (size) {
8301         case 0:
8302             shift |= shift << 8;
8303             /* fall through */
8304         case 1:
8305             shift |= shift << 16;
8306             break;
8307         case 2:
8308         case 3:
8309             break;
8310         default:
8311             g_assert_not_reached();
8312         }
8313     }
8314 
8315     if (!fp_access_check(s)) {
8316         return;
8317     }
8318 
8319     if (size == 3) {
8320         TCGv_i64 tcg_shift = tcg_const_i64(shift);
8321         static NeonGenTwo64OpEnvFn * const fns[2][2] = {
8322             { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
8323             { NULL, gen_helper_neon_qshl_u64 },
8324         };
8325         NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
8326         int maxpass = is_q ? 2 : 1;
8327 
8328         for (pass = 0; pass < maxpass; pass++) {
8329             TCGv_i64 tcg_op = tcg_temp_new_i64();
8330 
8331             read_vec_element(s, tcg_op, rn, pass, MO_64);
8332             genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8333             write_vec_element(s, tcg_op, rd, pass, MO_64);
8334 
8335             tcg_temp_free_i64(tcg_op);
8336         }
8337         tcg_temp_free_i64(tcg_shift);
8338         clear_vec_high(s, is_q, rd);
8339     } else {
8340         TCGv_i32 tcg_shift = tcg_const_i32(shift);
8341         static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
8342             {
8343                 { gen_helper_neon_qshl_s8,
8344                   gen_helper_neon_qshl_s16,
8345                   gen_helper_neon_qshl_s32 },
8346                 { gen_helper_neon_qshlu_s8,
8347                   gen_helper_neon_qshlu_s16,
8348                   gen_helper_neon_qshlu_s32 }
8349             }, {
8350                 { NULL, NULL, NULL },
8351                 { gen_helper_neon_qshl_u8,
8352                   gen_helper_neon_qshl_u16,
8353                   gen_helper_neon_qshl_u32 }
8354             }
8355         };
8356         NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
8357         MemOp memop = scalar ? size : MO_32;
8358         int maxpass = scalar ? 1 : is_q ? 4 : 2;
8359 
8360         for (pass = 0; pass < maxpass; pass++) {
8361             TCGv_i32 tcg_op = tcg_temp_new_i32();
8362 
8363             read_vec_element_i32(s, tcg_op, rn, pass, memop);
8364             genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8365             if (scalar) {
8366                 switch (size) {
8367                 case 0:
8368                     tcg_gen_ext8u_i32(tcg_op, tcg_op);
8369                     break;
8370                 case 1:
8371                     tcg_gen_ext16u_i32(tcg_op, tcg_op);
8372                     break;
8373                 case 2:
8374                     break;
8375                 default:
8376                     g_assert_not_reached();
8377                 }
8378                 write_fp_sreg(s, rd, tcg_op);
8379             } else {
8380                 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
8381             }
8382 
8383             tcg_temp_free_i32(tcg_op);
8384         }
8385         tcg_temp_free_i32(tcg_shift);
8386 
8387         if (!scalar) {
8388             clear_vec_high(s, is_q, rd);
8389         }
8390     }
8391 }
8392 
8393 /* Common vector code for handling integer to FP conversion */
handle_simd_intfp_conv(DisasContext * s,int rd,int rn,int elements,int is_signed,int fracbits,int size)8394 static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
8395                                    int elements, int is_signed,
8396                                    int fracbits, int size)
8397 {
8398     TCGv_ptr tcg_fpst = get_fpstatus_ptr(size == MO_16);
8399     TCGv_i32 tcg_shift = NULL;
8400 
8401     MemOp mop = size | (is_signed ? MO_SIGN : 0);
8402     int pass;
8403 
8404     if (fracbits || size == MO_64) {
8405         tcg_shift = tcg_const_i32(fracbits);
8406     }
8407 
8408     if (size == MO_64) {
8409         TCGv_i64 tcg_int64 = tcg_temp_new_i64();
8410         TCGv_i64 tcg_double = tcg_temp_new_i64();
8411 
8412         for (pass = 0; pass < elements; pass++) {
8413             read_vec_element(s, tcg_int64, rn, pass, mop);
8414 
8415             if (is_signed) {
8416                 gen_helper_vfp_sqtod(tcg_double, tcg_int64,
8417                                      tcg_shift, tcg_fpst);
8418             } else {
8419                 gen_helper_vfp_uqtod(tcg_double, tcg_int64,
8420                                      tcg_shift, tcg_fpst);
8421             }
8422             if (elements == 1) {
8423                 write_fp_dreg(s, rd, tcg_double);
8424             } else {
8425                 write_vec_element(s, tcg_double, rd, pass, MO_64);
8426             }
8427         }
8428 
8429         tcg_temp_free_i64(tcg_int64);
8430         tcg_temp_free_i64(tcg_double);
8431 
8432     } else {
8433         TCGv_i32 tcg_int32 = tcg_temp_new_i32();
8434         TCGv_i32 tcg_float = tcg_temp_new_i32();
8435 
8436         for (pass = 0; pass < elements; pass++) {
8437             read_vec_element_i32(s, tcg_int32, rn, pass, mop);
8438 
8439             switch (size) {
8440             case MO_32:
8441                 if (fracbits) {
8442                     if (is_signed) {
8443                         gen_helper_vfp_sltos(tcg_float, tcg_int32,
8444                                              tcg_shift, tcg_fpst);
8445                     } else {
8446                         gen_helper_vfp_ultos(tcg_float, tcg_int32,
8447                                              tcg_shift, tcg_fpst);
8448                     }
8449                 } else {
8450                     if (is_signed) {
8451                         gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
8452                     } else {
8453                         gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
8454                     }
8455                 }
8456                 break;
8457             case MO_16:
8458                 if (fracbits) {
8459                     if (is_signed) {
8460                         gen_helper_vfp_sltoh(tcg_float, tcg_int32,
8461                                              tcg_shift, tcg_fpst);
8462                     } else {
8463                         gen_helper_vfp_ultoh(tcg_float, tcg_int32,
8464                                              tcg_shift, tcg_fpst);
8465                     }
8466                 } else {
8467                     if (is_signed) {
8468                         gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
8469                     } else {
8470                         gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
8471                     }
8472                 }
8473                 break;
8474             default:
8475                 g_assert_not_reached();
8476             }
8477 
8478             if (elements == 1) {
8479                 write_fp_sreg(s, rd, tcg_float);
8480             } else {
8481                 write_vec_element_i32(s, tcg_float, rd, pass, size);
8482             }
8483         }
8484 
8485         tcg_temp_free_i32(tcg_int32);
8486         tcg_temp_free_i32(tcg_float);
8487     }
8488 
8489     tcg_temp_free_ptr(tcg_fpst);
8490     if (tcg_shift) {
8491         tcg_temp_free_i32(tcg_shift);
8492     }
8493 
8494     clear_vec_high(s, elements << size == 16, rd);
8495 }
8496 
8497 /* UCVTF/SCVTF - Integer to FP conversion */
handle_simd_shift_intfp_conv(DisasContext * s,bool is_scalar,bool is_q,bool is_u,int immh,int immb,int opcode,int rn,int rd)8498 static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
8499                                          bool is_q, bool is_u,
8500                                          int immh, int immb, int opcode,
8501                                          int rn, int rd)
8502 {
8503     int size, elements, fracbits;
8504     int immhb = immh << 3 | immb;
8505 
8506     if (immh & 8) {
8507         size = MO_64;
8508         if (!is_scalar && !is_q) {
8509             unallocated_encoding(s);
8510             return;
8511         }
8512     } else if (immh & 4) {
8513         size = MO_32;
8514     } else if (immh & 2) {
8515         size = MO_16;
8516         if (!dc_isar_feature(aa64_fp16, s)) {
8517             unallocated_encoding(s);
8518             return;
8519         }
8520     } else {
8521         /* immh == 0 would be a failure of the decode logic */
8522         g_assert(immh == 1);
8523         unallocated_encoding(s);
8524         return;
8525     }
8526 
8527     if (is_scalar) {
8528         elements = 1;
8529     } else {
8530         elements = (8 << is_q) >> size;
8531     }
8532     fracbits = (16 << size) - immhb;
8533 
8534     if (!fp_access_check(s)) {
8535         return;
8536     }
8537 
8538     handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
8539 }
8540 
8541 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
handle_simd_shift_fpint_conv(DisasContext * s,bool is_scalar,bool is_q,bool is_u,int immh,int immb,int rn,int rd)8542 static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
8543                                          bool is_q, bool is_u,
8544                                          int immh, int immb, int rn, int rd)
8545 {
8546     int immhb = immh << 3 | immb;
8547     int pass, size, fracbits;
8548     TCGv_ptr tcg_fpstatus;
8549     TCGv_i32 tcg_rmode, tcg_shift;
8550 
8551     if (immh & 0x8) {
8552         size = MO_64;
8553         if (!is_scalar && !is_q) {
8554             unallocated_encoding(s);
8555             return;
8556         }
8557     } else if (immh & 0x4) {
8558         size = MO_32;
8559     } else if (immh & 0x2) {
8560         size = MO_16;
8561         if (!dc_isar_feature(aa64_fp16, s)) {
8562             unallocated_encoding(s);
8563             return;
8564         }
8565     } else {
8566         /* Should have split out AdvSIMD modified immediate earlier.  */
8567         assert(immh == 1);
8568         unallocated_encoding(s);
8569         return;
8570     }
8571 
8572     if (!fp_access_check(s)) {
8573         return;
8574     }
8575 
8576     assert(!(is_scalar && is_q));
8577 
8578     tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
8579     tcg_fpstatus = get_fpstatus_ptr(size == MO_16);
8580     gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
8581     fracbits = (16 << size) - immhb;
8582     tcg_shift = tcg_const_i32(fracbits);
8583 
8584     if (size == MO_64) {
8585         int maxpass = is_scalar ? 1 : 2;
8586 
8587         for (pass = 0; pass < maxpass; pass++) {
8588             TCGv_i64 tcg_op = tcg_temp_new_i64();
8589 
8590             read_vec_element(s, tcg_op, rn, pass, MO_64);
8591             if (is_u) {
8592                 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
8593             } else {
8594                 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
8595             }
8596             write_vec_element(s, tcg_op, rd, pass, MO_64);
8597             tcg_temp_free_i64(tcg_op);
8598         }
8599         clear_vec_high(s, is_q, rd);
8600     } else {
8601         void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
8602         int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
8603 
8604         switch (size) {
8605         case MO_16:
8606             if (is_u) {
8607                 fn = gen_helper_vfp_touhh;
8608             } else {
8609                 fn = gen_helper_vfp_toshh;
8610             }
8611             break;
8612         case MO_32:
8613             if (is_u) {
8614                 fn = gen_helper_vfp_touls;
8615             } else {
8616                 fn = gen_helper_vfp_tosls;
8617             }
8618             break;
8619         default:
8620             g_assert_not_reached();
8621         }
8622 
8623         for (pass = 0; pass < maxpass; pass++) {
8624             TCGv_i32 tcg_op = tcg_temp_new_i32();
8625 
8626             read_vec_element_i32(s, tcg_op, rn, pass, size);
8627             fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
8628             if (is_scalar) {
8629                 write_fp_sreg(s, rd, tcg_op);
8630             } else {
8631                 write_vec_element_i32(s, tcg_op, rd, pass, size);
8632             }
8633             tcg_temp_free_i32(tcg_op);
8634         }
8635         if (!is_scalar) {
8636             clear_vec_high(s, is_q, rd);
8637         }
8638     }
8639 
8640     tcg_temp_free_ptr(tcg_fpstatus);
8641     tcg_temp_free_i32(tcg_shift);
8642     gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
8643     tcg_temp_free_i32(tcg_rmode);
8644 }
8645 
8646 /* AdvSIMD scalar shift by immediate
8647  *  31 30  29 28         23 22  19 18  16 15    11  10 9    5 4    0
8648  * +-----+---+-------------+------+------+--------+---+------+------+
8649  * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 |  Rn  |  Rd  |
8650  * +-----+---+-------------+------+------+--------+---+------+------+
8651  *
8652  * This is the scalar version so it works on a fixed sized registers
8653  */
disas_simd_scalar_shift_imm(DisasContext * s,uint32_t insn)8654 static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
8655 {
8656     int rd = extract32(insn, 0, 5);
8657     int rn = extract32(insn, 5, 5);
8658     int opcode = extract32(insn, 11, 5);
8659     int immb = extract32(insn, 16, 3);
8660     int immh = extract32(insn, 19, 4);
8661     bool is_u = extract32(insn, 29, 1);
8662 
8663     if (immh == 0) {
8664         unallocated_encoding(s);
8665         return;
8666     }
8667 
8668     switch (opcode) {
8669     case 0x08: /* SRI */
8670         if (!is_u) {
8671             unallocated_encoding(s);
8672             return;
8673         }
8674         /* fall through */
8675     case 0x00: /* SSHR / USHR */
8676     case 0x02: /* SSRA / USRA */
8677     case 0x04: /* SRSHR / URSHR */
8678     case 0x06: /* SRSRA / URSRA */
8679         handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
8680         break;
8681     case 0x0a: /* SHL / SLI */
8682         handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
8683         break;
8684     case 0x1c: /* SCVTF, UCVTF */
8685         handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
8686                                      opcode, rn, rd);
8687         break;
8688     case 0x10: /* SQSHRUN, SQSHRUN2 */
8689     case 0x11: /* SQRSHRUN, SQRSHRUN2 */
8690         if (!is_u) {
8691             unallocated_encoding(s);
8692             return;
8693         }
8694         handle_vec_simd_sqshrn(s, true, false, false, true,
8695                                immh, immb, opcode, rn, rd);
8696         break;
8697     case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
8698     case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
8699         handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
8700                                immh, immb, opcode, rn, rd);
8701         break;
8702     case 0xc: /* SQSHLU */
8703         if (!is_u) {
8704             unallocated_encoding(s);
8705             return;
8706         }
8707         handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
8708         break;
8709     case 0xe: /* SQSHL, UQSHL */
8710         handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
8711         break;
8712     case 0x1f: /* FCVTZS, FCVTZU */
8713         handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
8714         break;
8715     default:
8716         unallocated_encoding(s);
8717         break;
8718     }
8719 }
8720 
8721 /* AdvSIMD scalar three different
8722  *  31 30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
8723  * +-----+---+-----------+------+---+------+--------+-----+------+------+
8724  * | 0 1 | U | 1 1 1 1 0 | size | 1 |  Rm  | opcode | 0 0 |  Rn  |  Rd  |
8725  * +-----+---+-----------+------+---+------+--------+-----+------+------+
8726  */
disas_simd_scalar_three_reg_diff(DisasContext * s,uint32_t insn)8727 static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
8728 {
8729     bool is_u = extract32(insn, 29, 1);
8730     int size = extract32(insn, 22, 2);
8731     int opcode = extract32(insn, 12, 4);
8732     int rm = extract32(insn, 16, 5);
8733     int rn = extract32(insn, 5, 5);
8734     int rd = extract32(insn, 0, 5);
8735 
8736     if (is_u) {
8737         unallocated_encoding(s);
8738         return;
8739     }
8740 
8741     switch (opcode) {
8742     case 0x9: /* SQDMLAL, SQDMLAL2 */
8743     case 0xb: /* SQDMLSL, SQDMLSL2 */
8744     case 0xd: /* SQDMULL, SQDMULL2 */
8745         if (size == 0 || size == 3) {
8746             unallocated_encoding(s);
8747             return;
8748         }
8749         break;
8750     default:
8751         unallocated_encoding(s);
8752         return;
8753     }
8754 
8755     if (!fp_access_check(s)) {
8756         return;
8757     }
8758 
8759     if (size == 2) {
8760         TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8761         TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8762         TCGv_i64 tcg_res = tcg_temp_new_i64();
8763 
8764         read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
8765         read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
8766 
8767         tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
8768         gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
8769 
8770         switch (opcode) {
8771         case 0xd: /* SQDMULL, SQDMULL2 */
8772             break;
8773         case 0xb: /* SQDMLSL, SQDMLSL2 */
8774             tcg_gen_neg_i64(tcg_res, tcg_res);
8775             /* fall through */
8776         case 0x9: /* SQDMLAL, SQDMLAL2 */
8777             read_vec_element(s, tcg_op1, rd, 0, MO_64);
8778             gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
8779                                               tcg_res, tcg_op1);
8780             break;
8781         default:
8782             g_assert_not_reached();
8783         }
8784 
8785         write_fp_dreg(s, rd, tcg_res);
8786 
8787         tcg_temp_free_i64(tcg_op1);
8788         tcg_temp_free_i64(tcg_op2);
8789         tcg_temp_free_i64(tcg_res);
8790     } else {
8791         TCGv_i32 tcg_op1 = read_fp_hreg(s, rn);
8792         TCGv_i32 tcg_op2 = read_fp_hreg(s, rm);
8793         TCGv_i64 tcg_res = tcg_temp_new_i64();
8794 
8795         gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
8796         gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
8797 
8798         switch (opcode) {
8799         case 0xd: /* SQDMULL, SQDMULL2 */
8800             break;
8801         case 0xb: /* SQDMLSL, SQDMLSL2 */
8802             gen_helper_neon_negl_u32(tcg_res, tcg_res);
8803             /* fall through */
8804         case 0x9: /* SQDMLAL, SQDMLAL2 */
8805         {
8806             TCGv_i64 tcg_op3 = tcg_temp_new_i64();
8807             read_vec_element(s, tcg_op3, rd, 0, MO_32);
8808             gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
8809                                               tcg_res, tcg_op3);
8810             tcg_temp_free_i64(tcg_op3);
8811             break;
8812         }
8813         default:
8814             g_assert_not_reached();
8815         }
8816 
8817         tcg_gen_ext32u_i64(tcg_res, tcg_res);
8818         write_fp_dreg(s, rd, tcg_res);
8819 
8820         tcg_temp_free_i32(tcg_op1);
8821         tcg_temp_free_i32(tcg_op2);
8822         tcg_temp_free_i64(tcg_res);
8823     }
8824 }
8825 
handle_3same_64(DisasContext * s,int opcode,bool u,TCGv_i64 tcg_rd,TCGv_i64 tcg_rn,TCGv_i64 tcg_rm)8826 static void handle_3same_64(DisasContext *s, int opcode, bool u,
8827                             TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
8828 {
8829     /* Handle 64x64->64 opcodes which are shared between the scalar
8830      * and vector 3-same groups. We cover every opcode where size == 3
8831      * is valid in either the three-reg-same (integer, not pairwise)
8832      * or scalar-three-reg-same groups.
8833      */
8834     TCGCond cond;
8835 
8836     switch (opcode) {
8837     case 0x1: /* SQADD */
8838         if (u) {
8839             gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8840         } else {
8841             gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8842         }
8843         break;
8844     case 0x5: /* SQSUB */
8845         if (u) {
8846             gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8847         } else {
8848             gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8849         }
8850         break;
8851     case 0x6: /* CMGT, CMHI */
8852         /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
8853          * We implement this using setcond (test) and then negating.
8854          */
8855         cond = u ? TCG_COND_GTU : TCG_COND_GT;
8856     do_cmop:
8857         tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
8858         tcg_gen_neg_i64(tcg_rd, tcg_rd);
8859         break;
8860     case 0x7: /* CMGE, CMHS */
8861         cond = u ? TCG_COND_GEU : TCG_COND_GE;
8862         goto do_cmop;
8863     case 0x11: /* CMTST, CMEQ */
8864         if (u) {
8865             cond = TCG_COND_EQ;
8866             goto do_cmop;
8867         }
8868         gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
8869         break;
8870     case 0x8: /* SSHL, USHL */
8871         if (u) {
8872             gen_ushl_i64(tcg_rd, tcg_rn, tcg_rm);
8873         } else {
8874             gen_sshl_i64(tcg_rd, tcg_rn, tcg_rm);
8875         }
8876         break;
8877     case 0x9: /* SQSHL, UQSHL */
8878         if (u) {
8879             gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8880         } else {
8881             gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8882         }
8883         break;
8884     case 0xa: /* SRSHL, URSHL */
8885         if (u) {
8886             gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
8887         } else {
8888             gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
8889         }
8890         break;
8891     case 0xb: /* SQRSHL, UQRSHL */
8892         if (u) {
8893             gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8894         } else {
8895             gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8896         }
8897         break;
8898     case 0x10: /* ADD, SUB */
8899         if (u) {
8900             tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
8901         } else {
8902             tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
8903         }
8904         break;
8905     default:
8906         g_assert_not_reached();
8907     }
8908 }
8909 
8910 /* Handle the 3-same-operands float operations; shared by the scalar
8911  * and vector encodings. The caller must filter out any encodings
8912  * not allocated for the encoding it is dealing with.
8913  */
handle_3same_float(DisasContext * s,int size,int elements,int fpopcode,int rd,int rn,int rm)8914 static void handle_3same_float(DisasContext *s, int size, int elements,
8915                                int fpopcode, int rd, int rn, int rm)
8916 {
8917     int pass;
8918     TCGv_ptr fpst = get_fpstatus_ptr(false);
8919 
8920     for (pass = 0; pass < elements; pass++) {
8921         if (size) {
8922             /* Double */
8923             TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8924             TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8925             TCGv_i64 tcg_res = tcg_temp_new_i64();
8926 
8927             read_vec_element(s, tcg_op1, rn, pass, MO_64);
8928             read_vec_element(s, tcg_op2, rm, pass, MO_64);
8929 
8930             switch (fpopcode) {
8931             case 0x39: /* FMLS */
8932                 /* As usual for ARM, separate negation for fused multiply-add */
8933                 gen_helper_vfp_negd(tcg_op1, tcg_op1);
8934                 /* fall through */
8935             case 0x19: /* FMLA */
8936                 read_vec_element(s, tcg_res, rd, pass, MO_64);
8937                 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
8938                                        tcg_res, fpst);
8939                 break;
8940             case 0x18: /* FMAXNM */
8941                 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8942                 break;
8943             case 0x1a: /* FADD */
8944                 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
8945                 break;
8946             case 0x1b: /* FMULX */
8947                 gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
8948                 break;
8949             case 0x1c: /* FCMEQ */
8950                 gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8951                 break;
8952             case 0x1e: /* FMAX */
8953                 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
8954                 break;
8955             case 0x1f: /* FRECPS */
8956                 gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8957                 break;
8958             case 0x38: /* FMINNM */
8959                 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8960                 break;
8961             case 0x3a: /* FSUB */
8962                 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
8963                 break;
8964             case 0x3e: /* FMIN */
8965                 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
8966                 break;
8967             case 0x3f: /* FRSQRTS */
8968                 gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8969                 break;
8970             case 0x5b: /* FMUL */
8971                 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
8972                 break;
8973             case 0x5c: /* FCMGE */
8974                 gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8975                 break;
8976             case 0x5d: /* FACGE */
8977                 gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8978                 break;
8979             case 0x5f: /* FDIV */
8980                 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
8981                 break;
8982             case 0x7a: /* FABD */
8983                 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
8984                 gen_helper_vfp_absd(tcg_res, tcg_res);
8985                 break;
8986             case 0x7c: /* FCMGT */
8987                 gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8988                 break;
8989             case 0x7d: /* FACGT */
8990                 gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8991                 break;
8992             default:
8993                 g_assert_not_reached();
8994             }
8995 
8996             write_vec_element(s, tcg_res, rd, pass, MO_64);
8997 
8998             tcg_temp_free_i64(tcg_res);
8999             tcg_temp_free_i64(tcg_op1);
9000             tcg_temp_free_i64(tcg_op2);
9001         } else {
9002             /* Single */
9003             TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9004             TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9005             TCGv_i32 tcg_res = tcg_temp_new_i32();
9006 
9007             read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
9008             read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
9009 
9010             switch (fpopcode) {
9011             case 0x39: /* FMLS */
9012                 /* As usual for ARM, separate negation for fused multiply-add */
9013                 gen_helper_vfp_negs(tcg_op1, tcg_op1);
9014                 /* fall through */
9015             case 0x19: /* FMLA */
9016                 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9017                 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
9018                                        tcg_res, fpst);
9019                 break;
9020             case 0x1a: /* FADD */
9021                 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
9022                 break;
9023             case 0x1b: /* FMULX */
9024                 gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
9025                 break;
9026             case 0x1c: /* FCMEQ */
9027                 gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9028                 break;
9029             case 0x1e: /* FMAX */
9030                 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
9031                 break;
9032             case 0x1f: /* FRECPS */
9033                 gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9034                 break;
9035             case 0x18: /* FMAXNM */
9036                 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
9037                 break;
9038             case 0x38: /* FMINNM */
9039                 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
9040                 break;
9041             case 0x3a: /* FSUB */
9042                 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9043                 break;
9044             case 0x3e: /* FMIN */
9045                 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
9046                 break;
9047             case 0x3f: /* FRSQRTS */
9048                 gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9049                 break;
9050             case 0x5b: /* FMUL */
9051                 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
9052                 break;
9053             case 0x5c: /* FCMGE */
9054                 gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9055                 break;
9056             case 0x5d: /* FACGE */
9057                 gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9058                 break;
9059             case 0x5f: /* FDIV */
9060                 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
9061                 break;
9062             case 0x7a: /* FABD */
9063                 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9064                 gen_helper_vfp_abss(tcg_res, tcg_res);
9065                 break;
9066             case 0x7c: /* FCMGT */
9067                 gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9068                 break;
9069             case 0x7d: /* FACGT */
9070                 gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9071                 break;
9072             default:
9073                 g_assert_not_reached();
9074             }
9075 
9076             if (elements == 1) {
9077                 /* scalar single so clear high part */
9078                 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
9079 
9080                 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
9081                 write_vec_element(s, tcg_tmp, rd, pass, MO_64);
9082                 tcg_temp_free_i64(tcg_tmp);
9083             } else {
9084                 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9085             }
9086 
9087             tcg_temp_free_i32(tcg_res);
9088             tcg_temp_free_i32(tcg_op1);
9089             tcg_temp_free_i32(tcg_op2);
9090         }
9091     }
9092 
9093     tcg_temp_free_ptr(fpst);
9094 
9095     clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd);
9096 }
9097 
9098 /* AdvSIMD scalar three same
9099  *  31 30  29 28       24 23  22  21 20  16 15    11  10 9    5 4    0
9100  * +-----+---+-----------+------+---+------+--------+---+------+------+
9101  * | 0 1 | U | 1 1 1 1 0 | size | 1 |  Rm  | opcode | 1 |  Rn  |  Rd  |
9102  * +-----+---+-----------+------+---+------+--------+---+------+------+
9103  */
disas_simd_scalar_three_reg_same(DisasContext * s,uint32_t insn)9104 static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
9105 {
9106     int rd = extract32(insn, 0, 5);
9107     int rn = extract32(insn, 5, 5);
9108     int opcode = extract32(insn, 11, 5);
9109     int rm = extract32(insn, 16, 5);
9110     int size = extract32(insn, 22, 2);
9111     bool u = extract32(insn, 29, 1);
9112     TCGv_i64 tcg_rd;
9113 
9114     if (opcode >= 0x18) {
9115         /* Floating point: U, size[1] and opcode indicate operation */
9116         int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
9117         switch (fpopcode) {
9118         case 0x1b: /* FMULX */
9119         case 0x1f: /* FRECPS */
9120         case 0x3f: /* FRSQRTS */
9121         case 0x5d: /* FACGE */
9122         case 0x7d: /* FACGT */
9123         case 0x1c: /* FCMEQ */
9124         case 0x5c: /* FCMGE */
9125         case 0x7c: /* FCMGT */
9126         case 0x7a: /* FABD */
9127             break;
9128         default:
9129             unallocated_encoding(s);
9130             return;
9131         }
9132 
9133         if (!fp_access_check(s)) {
9134             return;
9135         }
9136 
9137         handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
9138         return;
9139     }
9140 
9141     switch (opcode) {
9142     case 0x1: /* SQADD, UQADD */
9143     case 0x5: /* SQSUB, UQSUB */
9144     case 0x9: /* SQSHL, UQSHL */
9145     case 0xb: /* SQRSHL, UQRSHL */
9146         break;
9147     case 0x8: /* SSHL, USHL */
9148     case 0xa: /* SRSHL, URSHL */
9149     case 0x6: /* CMGT, CMHI */
9150     case 0x7: /* CMGE, CMHS */
9151     case 0x11: /* CMTST, CMEQ */
9152     case 0x10: /* ADD, SUB (vector) */
9153         if (size != 3) {
9154             unallocated_encoding(s);
9155             return;
9156         }
9157         break;
9158     case 0x16: /* SQDMULH, SQRDMULH (vector) */
9159         if (size != 1 && size != 2) {
9160             unallocated_encoding(s);
9161             return;
9162         }
9163         break;
9164     default:
9165         unallocated_encoding(s);
9166         return;
9167     }
9168 
9169     if (!fp_access_check(s)) {
9170         return;
9171     }
9172 
9173     tcg_rd = tcg_temp_new_i64();
9174 
9175     if (size == 3) {
9176         TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
9177         TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
9178 
9179         handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
9180         tcg_temp_free_i64(tcg_rn);
9181         tcg_temp_free_i64(tcg_rm);
9182     } else {
9183         /* Do a single operation on the lowest element in the vector.
9184          * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
9185          * no side effects for all these operations.
9186          * OPTME: special-purpose helpers would avoid doing some
9187          * unnecessary work in the helper for the 8 and 16 bit cases.
9188          */
9189         NeonGenTwoOpEnvFn *genenvfn;
9190         TCGv_i32 tcg_rn = tcg_temp_new_i32();
9191         TCGv_i32 tcg_rm = tcg_temp_new_i32();
9192         TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
9193 
9194         read_vec_element_i32(s, tcg_rn, rn, 0, size);
9195         read_vec_element_i32(s, tcg_rm, rm, 0, size);
9196 
9197         switch (opcode) {
9198         case 0x1: /* SQADD, UQADD */
9199         {
9200             static NeonGenTwoOpEnvFn * const fns[3][2] = {
9201                 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
9202                 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
9203                 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
9204             };
9205             genenvfn = fns[size][u];
9206             break;
9207         }
9208         case 0x5: /* SQSUB, UQSUB */
9209         {
9210             static NeonGenTwoOpEnvFn * const fns[3][2] = {
9211                 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
9212                 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
9213                 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
9214             };
9215             genenvfn = fns[size][u];
9216             break;
9217         }
9218         case 0x9: /* SQSHL, UQSHL */
9219         {
9220             static NeonGenTwoOpEnvFn * const fns[3][2] = {
9221                 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
9222                 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
9223                 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
9224             };
9225             genenvfn = fns[size][u];
9226             break;
9227         }
9228         case 0xb: /* SQRSHL, UQRSHL */
9229         {
9230             static NeonGenTwoOpEnvFn * const fns[3][2] = {
9231                 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
9232                 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
9233                 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
9234             };
9235             genenvfn = fns[size][u];
9236             break;
9237         }
9238         case 0x16: /* SQDMULH, SQRDMULH */
9239         {
9240             static NeonGenTwoOpEnvFn * const fns[2][2] = {
9241                 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
9242                 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
9243             };
9244             assert(size == 1 || size == 2);
9245             genenvfn = fns[size - 1][u];
9246             break;
9247         }
9248         default:
9249             g_assert_not_reached();
9250         }
9251 
9252         genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
9253         tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
9254         tcg_temp_free_i32(tcg_rd32);
9255         tcg_temp_free_i32(tcg_rn);
9256         tcg_temp_free_i32(tcg_rm);
9257     }
9258 
9259     write_fp_dreg(s, rd, tcg_rd);
9260 
9261     tcg_temp_free_i64(tcg_rd);
9262 }
9263 
9264 /* AdvSIMD scalar three same FP16
9265  *  31 30  29 28       24 23  22 21 20  16 15 14 13    11 10  9  5 4  0
9266  * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9267  * | 0 1 | U | 1 1 1 1 0 | a | 1 0 |  Rm  | 0 0 | opcode | 1 | Rn | Rd |
9268  * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9269  * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400
9270  * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400
9271  */
disas_simd_scalar_three_reg_same_fp16(DisasContext * s,uint32_t insn)9272 static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
9273                                                   uint32_t insn)
9274 {
9275     int rd = extract32(insn, 0, 5);
9276     int rn = extract32(insn, 5, 5);
9277     int opcode = extract32(insn, 11, 3);
9278     int rm = extract32(insn, 16, 5);
9279     bool u = extract32(insn, 29, 1);
9280     bool a = extract32(insn, 23, 1);
9281     int fpopcode = opcode | (a << 3) |  (u << 4);
9282     TCGv_ptr fpst;
9283     TCGv_i32 tcg_op1;
9284     TCGv_i32 tcg_op2;
9285     TCGv_i32 tcg_res;
9286 
9287     switch (fpopcode) {
9288     case 0x03: /* FMULX */
9289     case 0x04: /* FCMEQ (reg) */
9290     case 0x07: /* FRECPS */
9291     case 0x0f: /* FRSQRTS */
9292     case 0x14: /* FCMGE (reg) */
9293     case 0x15: /* FACGE */
9294     case 0x1a: /* FABD */
9295     case 0x1c: /* FCMGT (reg) */
9296     case 0x1d: /* FACGT */
9297         break;
9298     default:
9299         unallocated_encoding(s);
9300         return;
9301     }
9302 
9303     if (!dc_isar_feature(aa64_fp16, s)) {
9304         unallocated_encoding(s);
9305     }
9306 
9307     if (!fp_access_check(s)) {
9308         return;
9309     }
9310 
9311     fpst = get_fpstatus_ptr(true);
9312 
9313     tcg_op1 = read_fp_hreg(s, rn);
9314     tcg_op2 = read_fp_hreg(s, rm);
9315     tcg_res = tcg_temp_new_i32();
9316 
9317     switch (fpopcode) {
9318     case 0x03: /* FMULX */
9319         gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
9320         break;
9321     case 0x04: /* FCMEQ (reg) */
9322         gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9323         break;
9324     case 0x07: /* FRECPS */
9325         gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9326         break;
9327     case 0x0f: /* FRSQRTS */
9328         gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9329         break;
9330     case 0x14: /* FCMGE (reg) */
9331         gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9332         break;
9333     case 0x15: /* FACGE */
9334         gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9335         break;
9336     case 0x1a: /* FABD */
9337         gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
9338         tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
9339         break;
9340     case 0x1c: /* FCMGT (reg) */
9341         gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9342         break;
9343     case 0x1d: /* FACGT */
9344         gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9345         break;
9346     default:
9347         g_assert_not_reached();
9348     }
9349 
9350     write_fp_sreg(s, rd, tcg_res);
9351 
9352 
9353     tcg_temp_free_i32(tcg_res);
9354     tcg_temp_free_i32(tcg_op1);
9355     tcg_temp_free_i32(tcg_op2);
9356     tcg_temp_free_ptr(fpst);
9357 }
9358 
9359 /* AdvSIMD scalar three same extra
9360  *  31 30  29 28       24 23  22  21 20  16  15 14    11  10 9  5 4  0
9361  * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9362  * | 0 1 | U | 1 1 1 1 0 | size | 0 |  Rm  | 1 | opcode | 1 | Rn | Rd |
9363  * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9364  */
disas_simd_scalar_three_reg_same_extra(DisasContext * s,uint32_t insn)9365 static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
9366                                                    uint32_t insn)
9367 {
9368     int rd = extract32(insn, 0, 5);
9369     int rn = extract32(insn, 5, 5);
9370     int opcode = extract32(insn, 11, 4);
9371     int rm = extract32(insn, 16, 5);
9372     int size = extract32(insn, 22, 2);
9373     bool u = extract32(insn, 29, 1);
9374     TCGv_i32 ele1, ele2, ele3;
9375     TCGv_i64 res;
9376     bool feature;
9377 
9378     switch (u * 16 + opcode) {
9379     case 0x10: /* SQRDMLAH (vector) */
9380     case 0x11: /* SQRDMLSH (vector) */
9381         if (size != 1 && size != 2) {
9382             unallocated_encoding(s);
9383             return;
9384         }
9385         feature = dc_isar_feature(aa64_rdm, s);
9386         break;
9387     default:
9388         unallocated_encoding(s);
9389         return;
9390     }
9391     if (!feature) {
9392         unallocated_encoding(s);
9393         return;
9394     }
9395     if (!fp_access_check(s)) {
9396         return;
9397     }
9398 
9399     /* Do a single operation on the lowest element in the vector.
9400      * We use the standard Neon helpers and rely on 0 OP 0 == 0
9401      * with no side effects for all these operations.
9402      * OPTME: special-purpose helpers would avoid doing some
9403      * unnecessary work in the helper for the 16 bit cases.
9404      */
9405     ele1 = tcg_temp_new_i32();
9406     ele2 = tcg_temp_new_i32();
9407     ele3 = tcg_temp_new_i32();
9408 
9409     read_vec_element_i32(s, ele1, rn, 0, size);
9410     read_vec_element_i32(s, ele2, rm, 0, size);
9411     read_vec_element_i32(s, ele3, rd, 0, size);
9412 
9413     switch (opcode) {
9414     case 0x0: /* SQRDMLAH */
9415         if (size == 1) {
9416             gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3);
9417         } else {
9418             gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3);
9419         }
9420         break;
9421     case 0x1: /* SQRDMLSH */
9422         if (size == 1) {
9423             gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3);
9424         } else {
9425             gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3);
9426         }
9427         break;
9428     default:
9429         g_assert_not_reached();
9430     }
9431     tcg_temp_free_i32(ele1);
9432     tcg_temp_free_i32(ele2);
9433 
9434     res = tcg_temp_new_i64();
9435     tcg_gen_extu_i32_i64(res, ele3);
9436     tcg_temp_free_i32(ele3);
9437 
9438     write_fp_dreg(s, rd, res);
9439     tcg_temp_free_i64(res);
9440 }
9441 
handle_2misc_64(DisasContext * s,int opcode,bool u,TCGv_i64 tcg_rd,TCGv_i64 tcg_rn,TCGv_i32 tcg_rmode,TCGv_ptr tcg_fpstatus)9442 static void handle_2misc_64(DisasContext *s, int opcode, bool u,
9443                             TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
9444                             TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
9445 {
9446     /* Handle 64->64 opcodes which are shared between the scalar and
9447      * vector 2-reg-misc groups. We cover every integer opcode where size == 3
9448      * is valid in either group and also the double-precision fp ops.
9449      * The caller only need provide tcg_rmode and tcg_fpstatus if the op
9450      * requires them.
9451      */
9452     TCGCond cond;
9453 
9454     switch (opcode) {
9455     case 0x4: /* CLS, CLZ */
9456         if (u) {
9457             tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
9458         } else {
9459             tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
9460         }
9461         break;
9462     case 0x5: /* NOT */
9463         /* This opcode is shared with CNT and RBIT but we have earlier
9464          * enforced that size == 3 if and only if this is the NOT insn.
9465          */
9466         tcg_gen_not_i64(tcg_rd, tcg_rn);
9467         break;
9468     case 0x7: /* SQABS, SQNEG */
9469         if (u) {
9470             gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
9471         } else {
9472             gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
9473         }
9474         break;
9475     case 0xa: /* CMLT */
9476         /* 64 bit integer comparison against zero, result is
9477          * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
9478          * subtracting 1.
9479          */
9480         cond = TCG_COND_LT;
9481     do_cmop:
9482         tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
9483         tcg_gen_neg_i64(tcg_rd, tcg_rd);
9484         break;
9485     case 0x8: /* CMGT, CMGE */
9486         cond = u ? TCG_COND_GE : TCG_COND_GT;
9487         goto do_cmop;
9488     case 0x9: /* CMEQ, CMLE */
9489         cond = u ? TCG_COND_LE : TCG_COND_EQ;
9490         goto do_cmop;
9491     case 0xb: /* ABS, NEG */
9492         if (u) {
9493             tcg_gen_neg_i64(tcg_rd, tcg_rn);
9494         } else {
9495             tcg_gen_abs_i64(tcg_rd, tcg_rn);
9496         }
9497         break;
9498     case 0x2f: /* FABS */
9499         gen_helper_vfp_absd(tcg_rd, tcg_rn);
9500         break;
9501     case 0x6f: /* FNEG */
9502         gen_helper_vfp_negd(tcg_rd, tcg_rn);
9503         break;
9504     case 0x7f: /* FSQRT */
9505         gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
9506         break;
9507     case 0x1a: /* FCVTNS */
9508     case 0x1b: /* FCVTMS */
9509     case 0x1c: /* FCVTAS */
9510     case 0x3a: /* FCVTPS */
9511     case 0x3b: /* FCVTZS */
9512     {
9513         TCGv_i32 tcg_shift = tcg_const_i32(0);
9514         gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9515         tcg_temp_free_i32(tcg_shift);
9516         break;
9517     }
9518     case 0x5a: /* FCVTNU */
9519     case 0x5b: /* FCVTMU */
9520     case 0x5c: /* FCVTAU */
9521     case 0x7a: /* FCVTPU */
9522     case 0x7b: /* FCVTZU */
9523     {
9524         TCGv_i32 tcg_shift = tcg_const_i32(0);
9525         gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9526         tcg_temp_free_i32(tcg_shift);
9527         break;
9528     }
9529     case 0x18: /* FRINTN */
9530     case 0x19: /* FRINTM */
9531     case 0x38: /* FRINTP */
9532     case 0x39: /* FRINTZ */
9533     case 0x58: /* FRINTA */
9534     case 0x79: /* FRINTI */
9535         gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
9536         break;
9537     case 0x59: /* FRINTX */
9538         gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
9539         break;
9540     case 0x1e: /* FRINT32Z */
9541     case 0x5e: /* FRINT32X */
9542         gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus);
9543         break;
9544     case 0x1f: /* FRINT64Z */
9545     case 0x5f: /* FRINT64X */
9546         gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus);
9547         break;
9548     default:
9549         g_assert_not_reached();
9550     }
9551 }
9552 
handle_2misc_fcmp_zero(DisasContext * s,int opcode,bool is_scalar,bool is_u,bool is_q,int size,int rn,int rd)9553 static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
9554                                    bool is_scalar, bool is_u, bool is_q,
9555                                    int size, int rn, int rd)
9556 {
9557     bool is_double = (size == MO_64);
9558     TCGv_ptr fpst;
9559 
9560     if (!fp_access_check(s)) {
9561         return;
9562     }
9563 
9564     fpst = get_fpstatus_ptr(size == MO_16);
9565 
9566     if (is_double) {
9567         TCGv_i64 tcg_op = tcg_temp_new_i64();
9568         TCGv_i64 tcg_zero = tcg_const_i64(0);
9569         TCGv_i64 tcg_res = tcg_temp_new_i64();
9570         NeonGenTwoDoubleOPFn *genfn;
9571         bool swap = false;
9572         int pass;
9573 
9574         switch (opcode) {
9575         case 0x2e: /* FCMLT (zero) */
9576             swap = true;
9577             /* fallthrough */
9578         case 0x2c: /* FCMGT (zero) */
9579             genfn = gen_helper_neon_cgt_f64;
9580             break;
9581         case 0x2d: /* FCMEQ (zero) */
9582             genfn = gen_helper_neon_ceq_f64;
9583             break;
9584         case 0x6d: /* FCMLE (zero) */
9585             swap = true;
9586             /* fall through */
9587         case 0x6c: /* FCMGE (zero) */
9588             genfn = gen_helper_neon_cge_f64;
9589             break;
9590         default:
9591             g_assert_not_reached();
9592         }
9593 
9594         for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9595             read_vec_element(s, tcg_op, rn, pass, MO_64);
9596             if (swap) {
9597                 genfn(tcg_res, tcg_zero, tcg_op, fpst);
9598             } else {
9599                 genfn(tcg_res, tcg_op, tcg_zero, fpst);
9600             }
9601             write_vec_element(s, tcg_res, rd, pass, MO_64);
9602         }
9603         tcg_temp_free_i64(tcg_res);
9604         tcg_temp_free_i64(tcg_zero);
9605         tcg_temp_free_i64(tcg_op);
9606 
9607         clear_vec_high(s, !is_scalar, rd);
9608     } else {
9609         TCGv_i32 tcg_op = tcg_temp_new_i32();
9610         TCGv_i32 tcg_zero = tcg_const_i32(0);
9611         TCGv_i32 tcg_res = tcg_temp_new_i32();
9612         NeonGenTwoSingleOPFn *genfn;
9613         bool swap = false;
9614         int pass, maxpasses;
9615 
9616         if (size == MO_16) {
9617             switch (opcode) {
9618             case 0x2e: /* FCMLT (zero) */
9619                 swap = true;
9620                 /* fall through */
9621             case 0x2c: /* FCMGT (zero) */
9622                 genfn = gen_helper_advsimd_cgt_f16;
9623                 break;
9624             case 0x2d: /* FCMEQ (zero) */
9625                 genfn = gen_helper_advsimd_ceq_f16;
9626                 break;
9627             case 0x6d: /* FCMLE (zero) */
9628                 swap = true;
9629                 /* fall through */
9630             case 0x6c: /* FCMGE (zero) */
9631                 genfn = gen_helper_advsimd_cge_f16;
9632                 break;
9633             default:
9634                 g_assert_not_reached();
9635             }
9636         } else {
9637             switch (opcode) {
9638             case 0x2e: /* FCMLT (zero) */
9639                 swap = true;
9640                 /* fall through */
9641             case 0x2c: /* FCMGT (zero) */
9642                 genfn = gen_helper_neon_cgt_f32;
9643                 break;
9644             case 0x2d: /* FCMEQ (zero) */
9645                 genfn = gen_helper_neon_ceq_f32;
9646                 break;
9647             case 0x6d: /* FCMLE (zero) */
9648                 swap = true;
9649                 /* fall through */
9650             case 0x6c: /* FCMGE (zero) */
9651                 genfn = gen_helper_neon_cge_f32;
9652                 break;
9653             default:
9654                 g_assert_not_reached();
9655             }
9656         }
9657 
9658         if (is_scalar) {
9659             maxpasses = 1;
9660         } else {
9661             int vector_size = 8 << is_q;
9662             maxpasses = vector_size >> size;
9663         }
9664 
9665         for (pass = 0; pass < maxpasses; pass++) {
9666             read_vec_element_i32(s, tcg_op, rn, pass, size);
9667             if (swap) {
9668                 genfn(tcg_res, tcg_zero, tcg_op, fpst);
9669             } else {
9670                 genfn(tcg_res, tcg_op, tcg_zero, fpst);
9671             }
9672             if (is_scalar) {
9673                 write_fp_sreg(s, rd, tcg_res);
9674             } else {
9675                 write_vec_element_i32(s, tcg_res, rd, pass, size);
9676             }
9677         }
9678         tcg_temp_free_i32(tcg_res);
9679         tcg_temp_free_i32(tcg_zero);
9680         tcg_temp_free_i32(tcg_op);
9681         if (!is_scalar) {
9682             clear_vec_high(s, is_q, rd);
9683         }
9684     }
9685 
9686     tcg_temp_free_ptr(fpst);
9687 }
9688 
handle_2misc_reciprocal(DisasContext * s,int opcode,bool is_scalar,bool is_u,bool is_q,int size,int rn,int rd)9689 static void handle_2misc_reciprocal(DisasContext *s, int opcode,
9690                                     bool is_scalar, bool is_u, bool is_q,
9691                                     int size, int rn, int rd)
9692 {
9693     bool is_double = (size == 3);
9694     TCGv_ptr fpst = get_fpstatus_ptr(false);
9695 
9696     if (is_double) {
9697         TCGv_i64 tcg_op = tcg_temp_new_i64();
9698         TCGv_i64 tcg_res = tcg_temp_new_i64();
9699         int pass;
9700 
9701         for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9702             read_vec_element(s, tcg_op, rn, pass, MO_64);
9703             switch (opcode) {
9704             case 0x3d: /* FRECPE */
9705                 gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
9706                 break;
9707             case 0x3f: /* FRECPX */
9708                 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
9709                 break;
9710             case 0x7d: /* FRSQRTE */
9711                 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
9712                 break;
9713             default:
9714                 g_assert_not_reached();
9715             }
9716             write_vec_element(s, tcg_res, rd, pass, MO_64);
9717         }
9718         tcg_temp_free_i64(tcg_res);
9719         tcg_temp_free_i64(tcg_op);
9720         clear_vec_high(s, !is_scalar, rd);
9721     } else {
9722         TCGv_i32 tcg_op = tcg_temp_new_i32();
9723         TCGv_i32 tcg_res = tcg_temp_new_i32();
9724         int pass, maxpasses;
9725 
9726         if (is_scalar) {
9727             maxpasses = 1;
9728         } else {
9729             maxpasses = is_q ? 4 : 2;
9730         }
9731 
9732         for (pass = 0; pass < maxpasses; pass++) {
9733             read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
9734 
9735             switch (opcode) {
9736             case 0x3c: /* URECPE */
9737                 gen_helper_recpe_u32(tcg_res, tcg_op, fpst);
9738                 break;
9739             case 0x3d: /* FRECPE */
9740                 gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
9741                 break;
9742             case 0x3f: /* FRECPX */
9743                 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
9744                 break;
9745             case 0x7d: /* FRSQRTE */
9746                 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
9747                 break;
9748             default:
9749                 g_assert_not_reached();
9750             }
9751 
9752             if (is_scalar) {
9753                 write_fp_sreg(s, rd, tcg_res);
9754             } else {
9755                 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9756             }
9757         }
9758         tcg_temp_free_i32(tcg_res);
9759         tcg_temp_free_i32(tcg_op);
9760         if (!is_scalar) {
9761             clear_vec_high(s, is_q, rd);
9762         }
9763     }
9764     tcg_temp_free_ptr(fpst);
9765 }
9766 
handle_2misc_narrow(DisasContext * s,bool scalar,int opcode,bool u,bool is_q,int size,int rn,int rd)9767 static void handle_2misc_narrow(DisasContext *s, bool scalar,
9768                                 int opcode, bool u, bool is_q,
9769                                 int size, int rn, int rd)
9770 {
9771     /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
9772      * in the source becomes a size element in the destination).
9773      */
9774     int pass;
9775     TCGv_i32 tcg_res[2];
9776     int destelt = is_q ? 2 : 0;
9777     int passes = scalar ? 1 : 2;
9778 
9779     if (scalar) {
9780         tcg_res[1] = tcg_const_i32(0);
9781     }
9782 
9783     for (pass = 0; pass < passes; pass++) {
9784         TCGv_i64 tcg_op = tcg_temp_new_i64();
9785         NeonGenNarrowFn *genfn = NULL;
9786         NeonGenNarrowEnvFn *genenvfn = NULL;
9787 
9788         if (scalar) {
9789             read_vec_element(s, tcg_op, rn, pass, size + 1);
9790         } else {
9791             read_vec_element(s, tcg_op, rn, pass, MO_64);
9792         }
9793         tcg_res[pass] = tcg_temp_new_i32();
9794 
9795         switch (opcode) {
9796         case 0x12: /* XTN, SQXTUN */
9797         {
9798             static NeonGenNarrowFn * const xtnfns[3] = {
9799                 gen_helper_neon_narrow_u8,
9800                 gen_helper_neon_narrow_u16,
9801                 tcg_gen_extrl_i64_i32,
9802             };
9803             static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
9804                 gen_helper_neon_unarrow_sat8,
9805                 gen_helper_neon_unarrow_sat16,
9806                 gen_helper_neon_unarrow_sat32,
9807             };
9808             if (u) {
9809                 genenvfn = sqxtunfns[size];
9810             } else {
9811                 genfn = xtnfns[size];
9812             }
9813             break;
9814         }
9815         case 0x14: /* SQXTN, UQXTN */
9816         {
9817             static NeonGenNarrowEnvFn * const fns[3][2] = {
9818                 { gen_helper_neon_narrow_sat_s8,
9819                   gen_helper_neon_narrow_sat_u8 },
9820                 { gen_helper_neon_narrow_sat_s16,
9821                   gen_helper_neon_narrow_sat_u16 },
9822                 { gen_helper_neon_narrow_sat_s32,
9823                   gen_helper_neon_narrow_sat_u32 },
9824             };
9825             genenvfn = fns[size][u];
9826             break;
9827         }
9828         case 0x16: /* FCVTN, FCVTN2 */
9829             /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
9830             if (size == 2) {
9831                 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
9832             } else {
9833                 TCGv_i32 tcg_lo = tcg_temp_new_i32();
9834                 TCGv_i32 tcg_hi = tcg_temp_new_i32();
9835                 TCGv_ptr fpst = get_fpstatus_ptr(false);
9836                 TCGv_i32 ahp = get_ahp_flag();
9837 
9838                 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
9839                 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
9840                 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
9841                 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
9842                 tcg_temp_free_i32(tcg_lo);
9843                 tcg_temp_free_i32(tcg_hi);
9844                 tcg_temp_free_ptr(fpst);
9845                 tcg_temp_free_i32(ahp);
9846             }
9847             break;
9848         case 0x56:  /* FCVTXN, FCVTXN2 */
9849             /* 64 bit to 32 bit float conversion
9850              * with von Neumann rounding (round to odd)
9851              */
9852             assert(size == 2);
9853             gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
9854             break;
9855         default:
9856             g_assert_not_reached();
9857         }
9858 
9859         if (genfn) {
9860             genfn(tcg_res[pass], tcg_op);
9861         } else if (genenvfn) {
9862             genenvfn(tcg_res[pass], cpu_env, tcg_op);
9863         }
9864 
9865         tcg_temp_free_i64(tcg_op);
9866     }
9867 
9868     for (pass = 0; pass < 2; pass++) {
9869         write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
9870         tcg_temp_free_i32(tcg_res[pass]);
9871     }
9872     clear_vec_high(s, is_q, rd);
9873 }
9874 
9875 /* Remaining saturating accumulating ops */
handle_2misc_satacc(DisasContext * s,bool is_scalar,bool is_u,bool is_q,int size,int rn,int rd)9876 static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
9877                                 bool is_q, int size, int rn, int rd)
9878 {
9879     bool is_double = (size == 3);
9880 
9881     if (is_double) {
9882         TCGv_i64 tcg_rn = tcg_temp_new_i64();
9883         TCGv_i64 tcg_rd = tcg_temp_new_i64();
9884         int pass;
9885 
9886         for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9887             read_vec_element(s, tcg_rn, rn, pass, MO_64);
9888             read_vec_element(s, tcg_rd, rd, pass, MO_64);
9889 
9890             if (is_u) { /* USQADD */
9891                 gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9892             } else { /* SUQADD */
9893                 gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9894             }
9895             write_vec_element(s, tcg_rd, rd, pass, MO_64);
9896         }
9897         tcg_temp_free_i64(tcg_rd);
9898         tcg_temp_free_i64(tcg_rn);
9899         clear_vec_high(s, !is_scalar, rd);
9900     } else {
9901         TCGv_i32 tcg_rn = tcg_temp_new_i32();
9902         TCGv_i32 tcg_rd = tcg_temp_new_i32();
9903         int pass, maxpasses;
9904 
9905         if (is_scalar) {
9906             maxpasses = 1;
9907         } else {
9908             maxpasses = is_q ? 4 : 2;
9909         }
9910 
9911         for (pass = 0; pass < maxpasses; pass++) {
9912             if (is_scalar) {
9913                 read_vec_element_i32(s, tcg_rn, rn, pass, size);
9914                 read_vec_element_i32(s, tcg_rd, rd, pass, size);
9915             } else {
9916                 read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
9917                 read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
9918             }
9919 
9920             if (is_u) { /* USQADD */
9921                 switch (size) {
9922                 case 0:
9923                     gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9924                     break;
9925                 case 1:
9926                     gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9927                     break;
9928                 case 2:
9929                     gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9930                     break;
9931                 default:
9932                     g_assert_not_reached();
9933                 }
9934             } else { /* SUQADD */
9935                 switch (size) {
9936                 case 0:
9937                     gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9938                     break;
9939                 case 1:
9940                     gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9941                     break;
9942                 case 2:
9943                     gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9944                     break;
9945                 default:
9946                     g_assert_not_reached();
9947                 }
9948             }
9949 
9950             if (is_scalar) {
9951                 TCGv_i64 tcg_zero = tcg_const_i64(0);
9952                 write_vec_element(s, tcg_zero, rd, 0, MO_64);
9953                 tcg_temp_free_i64(tcg_zero);
9954             }
9955             write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
9956         }
9957         tcg_temp_free_i32(tcg_rd);
9958         tcg_temp_free_i32(tcg_rn);
9959         clear_vec_high(s, is_q, rd);
9960     }
9961 }
9962 
9963 /* AdvSIMD scalar two reg misc
9964  *  31 30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
9965  * +-----+---+-----------+------+-----------+--------+-----+------+------+
9966  * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
9967  * +-----+---+-----------+------+-----------+--------+-----+------+------+
9968  */
disas_simd_scalar_two_reg_misc(DisasContext * s,uint32_t insn)9969 static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
9970 {
9971     int rd = extract32(insn, 0, 5);
9972     int rn = extract32(insn, 5, 5);
9973     int opcode = extract32(insn, 12, 5);
9974     int size = extract32(insn, 22, 2);
9975     bool u = extract32(insn, 29, 1);
9976     bool is_fcvt = false;
9977     int rmode;
9978     TCGv_i32 tcg_rmode;
9979     TCGv_ptr tcg_fpstatus;
9980 
9981     switch (opcode) {
9982     case 0x3: /* USQADD / SUQADD*/
9983         if (!fp_access_check(s)) {
9984             return;
9985         }
9986         handle_2misc_satacc(s, true, u, false, size, rn, rd);
9987         return;
9988     case 0x7: /* SQABS / SQNEG */
9989         break;
9990     case 0xa: /* CMLT */
9991         if (u) {
9992             unallocated_encoding(s);
9993             return;
9994         }
9995         /* fall through */
9996     case 0x8: /* CMGT, CMGE */
9997     case 0x9: /* CMEQ, CMLE */
9998     case 0xb: /* ABS, NEG */
9999         if (size != 3) {
10000             unallocated_encoding(s);
10001             return;
10002         }
10003         break;
10004     case 0x12: /* SQXTUN */
10005         if (!u) {
10006             unallocated_encoding(s);
10007             return;
10008         }
10009         /* fall through */
10010     case 0x14: /* SQXTN, UQXTN */
10011         if (size == 3) {
10012             unallocated_encoding(s);
10013             return;
10014         }
10015         if (!fp_access_check(s)) {
10016             return;
10017         }
10018         handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
10019         return;
10020     case 0xc ... 0xf:
10021     case 0x16 ... 0x1d:
10022     case 0x1f:
10023         /* Floating point: U, size[1] and opcode indicate operation;
10024          * size[0] indicates single or double precision.
10025          */
10026         opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
10027         size = extract32(size, 0, 1) ? 3 : 2;
10028         switch (opcode) {
10029         case 0x2c: /* FCMGT (zero) */
10030         case 0x2d: /* FCMEQ (zero) */
10031         case 0x2e: /* FCMLT (zero) */
10032         case 0x6c: /* FCMGE (zero) */
10033         case 0x6d: /* FCMLE (zero) */
10034             handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
10035             return;
10036         case 0x1d: /* SCVTF */
10037         case 0x5d: /* UCVTF */
10038         {
10039             bool is_signed = (opcode == 0x1d);
10040             if (!fp_access_check(s)) {
10041                 return;
10042             }
10043             handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
10044             return;
10045         }
10046         case 0x3d: /* FRECPE */
10047         case 0x3f: /* FRECPX */
10048         case 0x7d: /* FRSQRTE */
10049             if (!fp_access_check(s)) {
10050                 return;
10051             }
10052             handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
10053             return;
10054         case 0x1a: /* FCVTNS */
10055         case 0x1b: /* FCVTMS */
10056         case 0x3a: /* FCVTPS */
10057         case 0x3b: /* FCVTZS */
10058         case 0x5a: /* FCVTNU */
10059         case 0x5b: /* FCVTMU */
10060         case 0x7a: /* FCVTPU */
10061         case 0x7b: /* FCVTZU */
10062             is_fcvt = true;
10063             rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
10064             break;
10065         case 0x1c: /* FCVTAS */
10066         case 0x5c: /* FCVTAU */
10067             /* TIEAWAY doesn't fit in the usual rounding mode encoding */
10068             is_fcvt = true;
10069             rmode = FPROUNDING_TIEAWAY;
10070             break;
10071         case 0x56: /* FCVTXN, FCVTXN2 */
10072             if (size == 2) {
10073                 unallocated_encoding(s);
10074                 return;
10075             }
10076             if (!fp_access_check(s)) {
10077                 return;
10078             }
10079             handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
10080             return;
10081         default:
10082             unallocated_encoding(s);
10083             return;
10084         }
10085         break;
10086     default:
10087         unallocated_encoding(s);
10088         return;
10089     }
10090 
10091     if (!fp_access_check(s)) {
10092         return;
10093     }
10094 
10095     if (is_fcvt) {
10096         tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
10097         tcg_fpstatus = get_fpstatus_ptr(false);
10098         gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
10099     } else {
10100         tcg_rmode = NULL;
10101         tcg_fpstatus = NULL;
10102     }
10103 
10104     if (size == 3) {
10105         TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
10106         TCGv_i64 tcg_rd = tcg_temp_new_i64();
10107 
10108         handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
10109         write_fp_dreg(s, rd, tcg_rd);
10110         tcg_temp_free_i64(tcg_rd);
10111         tcg_temp_free_i64(tcg_rn);
10112     } else {
10113         TCGv_i32 tcg_rn = tcg_temp_new_i32();
10114         TCGv_i32 tcg_rd = tcg_temp_new_i32();
10115 
10116         read_vec_element_i32(s, tcg_rn, rn, 0, size);
10117 
10118         switch (opcode) {
10119         case 0x7: /* SQABS, SQNEG */
10120         {
10121             NeonGenOneOpEnvFn *genfn;
10122             static NeonGenOneOpEnvFn * const fns[3][2] = {
10123                 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
10124                 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
10125                 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
10126             };
10127             genfn = fns[size][u];
10128             genfn(tcg_rd, cpu_env, tcg_rn);
10129             break;
10130         }
10131         case 0x1a: /* FCVTNS */
10132         case 0x1b: /* FCVTMS */
10133         case 0x1c: /* FCVTAS */
10134         case 0x3a: /* FCVTPS */
10135         case 0x3b: /* FCVTZS */
10136         {
10137             TCGv_i32 tcg_shift = tcg_const_i32(0);
10138             gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
10139             tcg_temp_free_i32(tcg_shift);
10140             break;
10141         }
10142         case 0x5a: /* FCVTNU */
10143         case 0x5b: /* FCVTMU */
10144         case 0x5c: /* FCVTAU */
10145         case 0x7a: /* FCVTPU */
10146         case 0x7b: /* FCVTZU */
10147         {
10148             TCGv_i32 tcg_shift = tcg_const_i32(0);
10149             gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
10150             tcg_temp_free_i32(tcg_shift);
10151             break;
10152         }
10153         default:
10154             g_assert_not_reached();
10155         }
10156 
10157         write_fp_sreg(s, rd, tcg_rd);
10158         tcg_temp_free_i32(tcg_rd);
10159         tcg_temp_free_i32(tcg_rn);
10160     }
10161 
10162     if (is_fcvt) {
10163         gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
10164         tcg_temp_free_i32(tcg_rmode);
10165         tcg_temp_free_ptr(tcg_fpstatus);
10166     }
10167 }
10168 
10169 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
handle_vec_simd_shri(DisasContext * s,bool is_q,bool is_u,int immh,int immb,int opcode,int rn,int rd)10170 static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
10171                                  int immh, int immb, int opcode, int rn, int rd)
10172 {
10173     int size = 32 - clz32(immh) - 1;
10174     int immhb = immh << 3 | immb;
10175     int shift = 2 * (8 << size) - immhb;
10176     bool accumulate = false;
10177     int dsize = is_q ? 128 : 64;
10178     int esize = 8 << size;
10179     int elements = dsize/esize;
10180     MemOp memop = size | (is_u ? 0 : MO_SIGN);
10181     TCGv_i64 tcg_rn = new_tmp_a64(s);
10182     TCGv_i64 tcg_rd = new_tmp_a64(s);
10183     TCGv_i64 tcg_round;
10184     uint64_t round_const;
10185     int i;
10186 
10187     if (extract32(immh, 3, 1) && !is_q) {
10188         unallocated_encoding(s);
10189         return;
10190     }
10191     tcg_debug_assert(size <= 3);
10192 
10193     if (!fp_access_check(s)) {
10194         return;
10195     }
10196 
10197     switch (opcode) {
10198     case 0x02: /* SSRA / USRA (accumulate) */
10199         if (is_u) {
10200             /* Shift count same as element size produces zero to add.  */
10201             if (shift == 8 << size) {
10202                 goto done;
10203             }
10204             gen_gvec_op2i(s, is_q, rd, rn, shift, &usra_op[size]);
10205         } else {
10206             /* Shift count same as element size produces all sign to add.  */
10207             if (shift == 8 << size) {
10208                 shift -= 1;
10209             }
10210             gen_gvec_op2i(s, is_q, rd, rn, shift, &ssra_op[size]);
10211         }
10212         return;
10213     case 0x08: /* SRI */
10214         /* Shift count same as element size is valid but does nothing.  */
10215         if (shift == 8 << size) {
10216             goto done;
10217         }
10218         gen_gvec_op2i(s, is_q, rd, rn, shift, &sri_op[size]);
10219         return;
10220 
10221     case 0x00: /* SSHR / USHR */
10222         if (is_u) {
10223             if (shift == 8 << size) {
10224                 /* Shift count the same size as element size produces zero.  */
10225                 tcg_gen_gvec_dup8i(vec_full_reg_offset(s, rd),
10226                                    is_q ? 16 : 8, vec_full_reg_size(s), 0);
10227             } else {
10228                 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shri, size);
10229             }
10230         } else {
10231             /* Shift count the same size as element size produces all sign.  */
10232             if (shift == 8 << size) {
10233                 shift -= 1;
10234             }
10235             gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_sari, size);
10236         }
10237         return;
10238 
10239     case 0x04: /* SRSHR / URSHR (rounding) */
10240         break;
10241     case 0x06: /* SRSRA / URSRA (accum + rounding) */
10242         accumulate = true;
10243         break;
10244     default:
10245         g_assert_not_reached();
10246     }
10247 
10248     round_const = 1ULL << (shift - 1);
10249     tcg_round = tcg_const_i64(round_const);
10250 
10251     for (i = 0; i < elements; i++) {
10252         read_vec_element(s, tcg_rn, rn, i, memop);
10253         if (accumulate) {
10254             read_vec_element(s, tcg_rd, rd, i, memop);
10255         }
10256 
10257         handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
10258                                 accumulate, is_u, size, shift);
10259 
10260         write_vec_element(s, tcg_rd, rd, i, size);
10261     }
10262     tcg_temp_free_i64(tcg_round);
10263 
10264  done:
10265     clear_vec_high(s, is_q, rd);
10266 }
10267 
10268 /* SHL/SLI - Vector shift left */
handle_vec_simd_shli(DisasContext * s,bool is_q,bool insert,int immh,int immb,int opcode,int rn,int rd)10269 static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
10270                                  int immh, int immb, int opcode, int rn, int rd)
10271 {
10272     int size = 32 - clz32(immh) - 1;
10273     int immhb = immh << 3 | immb;
10274     int shift = immhb - (8 << size);
10275 
10276     /* Range of size is limited by decode: immh is a non-zero 4 bit field */
10277     assert(size >= 0 && size <= 3);
10278 
10279     if (extract32(immh, 3, 1) && !is_q) {
10280         unallocated_encoding(s);
10281         return;
10282     }
10283 
10284     if (!fp_access_check(s)) {
10285         return;
10286     }
10287 
10288     if (insert) {
10289         gen_gvec_op2i(s, is_q, rd, rn, shift, &sli_op[size]);
10290     } else {
10291         gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
10292     }
10293 }
10294 
10295 /* USHLL/SHLL - Vector shift left with widening */
handle_vec_simd_wshli(DisasContext * s,bool is_q,bool is_u,int immh,int immb,int opcode,int rn,int rd)10296 static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
10297                                  int immh, int immb, int opcode, int rn, int rd)
10298 {
10299     int size = 32 - clz32(immh) - 1;
10300     int immhb = immh << 3 | immb;
10301     int shift = immhb - (8 << size);
10302     int dsize = 64;
10303     int esize = 8 << size;
10304     int elements = dsize/esize;
10305     TCGv_i64 tcg_rn = new_tmp_a64(s);
10306     TCGv_i64 tcg_rd = new_tmp_a64(s);
10307     int i;
10308 
10309     if (size >= 3) {
10310         unallocated_encoding(s);
10311         return;
10312     }
10313 
10314     if (!fp_access_check(s)) {
10315         return;
10316     }
10317 
10318     /* For the LL variants the store is larger than the load,
10319      * so if rd == rn we would overwrite parts of our input.
10320      * So load everything right now and use shifts in the main loop.
10321      */
10322     read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
10323 
10324     for (i = 0; i < elements; i++) {
10325         tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
10326         ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
10327         tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
10328         write_vec_element(s, tcg_rd, rd, i, size + 1);
10329     }
10330 }
10331 
10332 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
handle_vec_simd_shrn(DisasContext * s,bool is_q,int immh,int immb,int opcode,int rn,int rd)10333 static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
10334                                  int immh, int immb, int opcode, int rn, int rd)
10335 {
10336     int immhb = immh << 3 | immb;
10337     int size = 32 - clz32(immh) - 1;
10338     int dsize = 64;
10339     int esize = 8 << size;
10340     int elements = dsize/esize;
10341     int shift = (2 * esize) - immhb;
10342     bool round = extract32(opcode, 0, 1);
10343     TCGv_i64 tcg_rn, tcg_rd, tcg_final;
10344     TCGv_i64 tcg_round;
10345     int i;
10346 
10347     if (extract32(immh, 3, 1)) {
10348         unallocated_encoding(s);
10349         return;
10350     }
10351 
10352     if (!fp_access_check(s)) {
10353         return;
10354     }
10355 
10356     tcg_rn = tcg_temp_new_i64();
10357     tcg_rd = tcg_temp_new_i64();
10358     tcg_final = tcg_temp_new_i64();
10359     read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
10360 
10361     if (round) {
10362         uint64_t round_const = 1ULL << (shift - 1);
10363         tcg_round = tcg_const_i64(round_const);
10364     } else {
10365         tcg_round = NULL;
10366     }
10367 
10368     for (i = 0; i < elements; i++) {
10369         read_vec_element(s, tcg_rn, rn, i, size+1);
10370         handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
10371                                 false, true, size+1, shift);
10372 
10373         tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
10374     }
10375 
10376     if (!is_q) {
10377         write_vec_element(s, tcg_final, rd, 0, MO_64);
10378     } else {
10379         write_vec_element(s, tcg_final, rd, 1, MO_64);
10380     }
10381     if (round) {
10382         tcg_temp_free_i64(tcg_round);
10383     }
10384     tcg_temp_free_i64(tcg_rn);
10385     tcg_temp_free_i64(tcg_rd);
10386     tcg_temp_free_i64(tcg_final);
10387 
10388     clear_vec_high(s, is_q, rd);
10389 }
10390 
10391 
10392 /* AdvSIMD shift by immediate
10393  *  31  30   29 28         23 22  19 18  16 15    11  10 9    5 4    0
10394  * +---+---+---+-------------+------+------+--------+---+------+------+
10395  * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 |  Rn  |  Rd  |
10396  * +---+---+---+-------------+------+------+--------+---+------+------+
10397  */
disas_simd_shift_imm(DisasContext * s,uint32_t insn)10398 static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
10399 {
10400     int rd = extract32(insn, 0, 5);
10401     int rn = extract32(insn, 5, 5);
10402     int opcode = extract32(insn, 11, 5);
10403     int immb = extract32(insn, 16, 3);
10404     int immh = extract32(insn, 19, 4);
10405     bool is_u = extract32(insn, 29, 1);
10406     bool is_q = extract32(insn, 30, 1);
10407 
10408     switch (opcode) {
10409     case 0x08: /* SRI */
10410         if (!is_u) {
10411             unallocated_encoding(s);
10412             return;
10413         }
10414         /* fall through */
10415     case 0x00: /* SSHR / USHR */
10416     case 0x02: /* SSRA / USRA (accumulate) */
10417     case 0x04: /* SRSHR / URSHR (rounding) */
10418     case 0x06: /* SRSRA / URSRA (accum + rounding) */
10419         handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
10420         break;
10421     case 0x0a: /* SHL / SLI */
10422         handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10423         break;
10424     case 0x10: /* SHRN */
10425     case 0x11: /* RSHRN / SQRSHRUN */
10426         if (is_u) {
10427             handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
10428                                    opcode, rn, rd);
10429         } else {
10430             handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
10431         }
10432         break;
10433     case 0x12: /* SQSHRN / UQSHRN */
10434     case 0x13: /* SQRSHRN / UQRSHRN */
10435         handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
10436                                opcode, rn, rd);
10437         break;
10438     case 0x14: /* SSHLL / USHLL */
10439         handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10440         break;
10441     case 0x1c: /* SCVTF / UCVTF */
10442         handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
10443                                      opcode, rn, rd);
10444         break;
10445     case 0xc: /* SQSHLU */
10446         if (!is_u) {
10447             unallocated_encoding(s);
10448             return;
10449         }
10450         handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
10451         break;
10452     case 0xe: /* SQSHL, UQSHL */
10453         handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
10454         break;
10455     case 0x1f: /* FCVTZS/ FCVTZU */
10456         handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
10457         return;
10458     default:
10459         unallocated_encoding(s);
10460         return;
10461     }
10462 }
10463 
10464 /* Generate code to do a "long" addition or subtraction, ie one done in
10465  * TCGv_i64 on vector lanes twice the width specified by size.
10466  */
gen_neon_addl(int size,bool is_sub,TCGv_i64 tcg_res,TCGv_i64 tcg_op1,TCGv_i64 tcg_op2)10467 static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
10468                           TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
10469 {
10470     static NeonGenTwo64OpFn * const fns[3][2] = {
10471         { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
10472         { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
10473         { tcg_gen_add_i64, tcg_gen_sub_i64 },
10474     };
10475     NeonGenTwo64OpFn *genfn;
10476     assert(size < 3);
10477 
10478     genfn = fns[size][is_sub];
10479     genfn(tcg_res, tcg_op1, tcg_op2);
10480 }
10481 
handle_3rd_widening(DisasContext * s,int is_q,int is_u,int size,int opcode,int rd,int rn,int rm)10482 static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
10483                                 int opcode, int rd, int rn, int rm)
10484 {
10485     /* 3-reg-different widening insns: 64 x 64 -> 128 */
10486     TCGv_i64 tcg_res[2];
10487     int pass, accop;
10488 
10489     tcg_res[0] = tcg_temp_new_i64();
10490     tcg_res[1] = tcg_temp_new_i64();
10491 
10492     /* Does this op do an adding accumulate, a subtracting accumulate,
10493      * or no accumulate at all?
10494      */
10495     switch (opcode) {
10496     case 5:
10497     case 8:
10498     case 9:
10499         accop = 1;
10500         break;
10501     case 10:
10502     case 11:
10503         accop = -1;
10504         break;
10505     default:
10506         accop = 0;
10507         break;
10508     }
10509 
10510     if (accop != 0) {
10511         read_vec_element(s, tcg_res[0], rd, 0, MO_64);
10512         read_vec_element(s, tcg_res[1], rd, 1, MO_64);
10513     }
10514 
10515     /* size == 2 means two 32x32->64 operations; this is worth special
10516      * casing because we can generally handle it inline.
10517      */
10518     if (size == 2) {
10519         for (pass = 0; pass < 2; pass++) {
10520             TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10521             TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10522             TCGv_i64 tcg_passres;
10523             MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
10524 
10525             int elt = pass + is_q * 2;
10526 
10527             read_vec_element(s, tcg_op1, rn, elt, memop);
10528             read_vec_element(s, tcg_op2, rm, elt, memop);
10529 
10530             if (accop == 0) {
10531                 tcg_passres = tcg_res[pass];
10532             } else {
10533                 tcg_passres = tcg_temp_new_i64();
10534             }
10535 
10536             switch (opcode) {
10537             case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10538                 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
10539                 break;
10540             case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10541                 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
10542                 break;
10543             case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10544             case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10545             {
10546                 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
10547                 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
10548 
10549                 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
10550                 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
10551                 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
10552                                     tcg_passres,
10553                                     tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
10554                 tcg_temp_free_i64(tcg_tmp1);
10555                 tcg_temp_free_i64(tcg_tmp2);
10556                 break;
10557             }
10558             case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10559             case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10560             case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10561                 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10562                 break;
10563             case 9: /* SQDMLAL, SQDMLAL2 */
10564             case 11: /* SQDMLSL, SQDMLSL2 */
10565             case 13: /* SQDMULL, SQDMULL2 */
10566                 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10567                 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
10568                                                   tcg_passres, tcg_passres);
10569                 break;
10570             default:
10571                 g_assert_not_reached();
10572             }
10573 
10574             if (opcode == 9 || opcode == 11) {
10575                 /* saturating accumulate ops */
10576                 if (accop < 0) {
10577                     tcg_gen_neg_i64(tcg_passres, tcg_passres);
10578                 }
10579                 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
10580                                                   tcg_res[pass], tcg_passres);
10581             } else if (accop > 0) {
10582                 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10583             } else if (accop < 0) {
10584                 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10585             }
10586 
10587             if (accop != 0) {
10588                 tcg_temp_free_i64(tcg_passres);
10589             }
10590 
10591             tcg_temp_free_i64(tcg_op1);
10592             tcg_temp_free_i64(tcg_op2);
10593         }
10594     } else {
10595         /* size 0 or 1, generally helper functions */
10596         for (pass = 0; pass < 2; pass++) {
10597             TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10598             TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10599             TCGv_i64 tcg_passres;
10600             int elt = pass + is_q * 2;
10601 
10602             read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
10603             read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
10604 
10605             if (accop == 0) {
10606                 tcg_passres = tcg_res[pass];
10607             } else {
10608                 tcg_passres = tcg_temp_new_i64();
10609             }
10610 
10611             switch (opcode) {
10612             case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10613             case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10614             {
10615                 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
10616                 static NeonGenWidenFn * const widenfns[2][2] = {
10617                     { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10618                     { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10619                 };
10620                 NeonGenWidenFn *widenfn = widenfns[size][is_u];
10621 
10622                 widenfn(tcg_op2_64, tcg_op2);
10623                 widenfn(tcg_passres, tcg_op1);
10624                 gen_neon_addl(size, (opcode == 2), tcg_passres,
10625                               tcg_passres, tcg_op2_64);
10626                 tcg_temp_free_i64(tcg_op2_64);
10627                 break;
10628             }
10629             case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10630             case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10631                 if (size == 0) {
10632                     if (is_u) {
10633                         gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
10634                     } else {
10635                         gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
10636                     }
10637                 } else {
10638                     if (is_u) {
10639                         gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
10640                     } else {
10641                         gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
10642                     }
10643                 }
10644                 break;
10645             case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10646             case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10647             case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10648                 if (size == 0) {
10649                     if (is_u) {
10650                         gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
10651                     } else {
10652                         gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
10653                     }
10654                 } else {
10655                     if (is_u) {
10656                         gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
10657                     } else {
10658                         gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
10659                     }
10660                 }
10661                 break;
10662             case 9: /* SQDMLAL, SQDMLAL2 */
10663             case 11: /* SQDMLSL, SQDMLSL2 */
10664             case 13: /* SQDMULL, SQDMULL2 */
10665                 assert(size == 1);
10666                 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
10667                 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
10668                                                   tcg_passres, tcg_passres);
10669                 break;
10670             default:
10671                 g_assert_not_reached();
10672             }
10673             tcg_temp_free_i32(tcg_op1);
10674             tcg_temp_free_i32(tcg_op2);
10675 
10676             if (accop != 0) {
10677                 if (opcode == 9 || opcode == 11) {
10678                     /* saturating accumulate ops */
10679                     if (accop < 0) {
10680                         gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
10681                     }
10682                     gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
10683                                                       tcg_res[pass],
10684                                                       tcg_passres);
10685                 } else {
10686                     gen_neon_addl(size, (accop < 0), tcg_res[pass],
10687                                   tcg_res[pass], tcg_passres);
10688                 }
10689                 tcg_temp_free_i64(tcg_passres);
10690             }
10691         }
10692     }
10693 
10694     write_vec_element(s, tcg_res[0], rd, 0, MO_64);
10695     write_vec_element(s, tcg_res[1], rd, 1, MO_64);
10696     tcg_temp_free_i64(tcg_res[0]);
10697     tcg_temp_free_i64(tcg_res[1]);
10698 }
10699 
handle_3rd_wide(DisasContext * s,int is_q,int is_u,int size,int opcode,int rd,int rn,int rm)10700 static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
10701                             int opcode, int rd, int rn, int rm)
10702 {
10703     TCGv_i64 tcg_res[2];
10704     int part = is_q ? 2 : 0;
10705     int pass;
10706 
10707     for (pass = 0; pass < 2; pass++) {
10708         TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10709         TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10710         TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
10711         static NeonGenWidenFn * const widenfns[3][2] = {
10712             { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10713             { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10714             { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
10715         };
10716         NeonGenWidenFn *widenfn = widenfns[size][is_u];
10717 
10718         read_vec_element(s, tcg_op1, rn, pass, MO_64);
10719         read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
10720         widenfn(tcg_op2_wide, tcg_op2);
10721         tcg_temp_free_i32(tcg_op2);
10722         tcg_res[pass] = tcg_temp_new_i64();
10723         gen_neon_addl(size, (opcode == 3),
10724                       tcg_res[pass], tcg_op1, tcg_op2_wide);
10725         tcg_temp_free_i64(tcg_op1);
10726         tcg_temp_free_i64(tcg_op2_wide);
10727     }
10728 
10729     for (pass = 0; pass < 2; pass++) {
10730         write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10731         tcg_temp_free_i64(tcg_res[pass]);
10732     }
10733 }
10734 
do_narrow_round_high_u32(TCGv_i32 res,TCGv_i64 in)10735 static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
10736 {
10737     tcg_gen_addi_i64(in, in, 1U << 31);
10738     tcg_gen_extrh_i64_i32(res, in);
10739 }
10740 
handle_3rd_narrowing(DisasContext * s,int is_q,int is_u,int size,int opcode,int rd,int rn,int rm)10741 static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
10742                                  int opcode, int rd, int rn, int rm)
10743 {
10744     TCGv_i32 tcg_res[2];
10745     int part = is_q ? 2 : 0;
10746     int pass;
10747 
10748     for (pass = 0; pass < 2; pass++) {
10749         TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10750         TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10751         TCGv_i64 tcg_wideres = tcg_temp_new_i64();
10752         static NeonGenNarrowFn * const narrowfns[3][2] = {
10753             { gen_helper_neon_narrow_high_u8,
10754               gen_helper_neon_narrow_round_high_u8 },
10755             { gen_helper_neon_narrow_high_u16,
10756               gen_helper_neon_narrow_round_high_u16 },
10757             { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
10758         };
10759         NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
10760 
10761         read_vec_element(s, tcg_op1, rn, pass, MO_64);
10762         read_vec_element(s, tcg_op2, rm, pass, MO_64);
10763 
10764         gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
10765 
10766         tcg_temp_free_i64(tcg_op1);
10767         tcg_temp_free_i64(tcg_op2);
10768 
10769         tcg_res[pass] = tcg_temp_new_i32();
10770         gennarrow(tcg_res[pass], tcg_wideres);
10771         tcg_temp_free_i64(tcg_wideres);
10772     }
10773 
10774     for (pass = 0; pass < 2; pass++) {
10775         write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
10776         tcg_temp_free_i32(tcg_res[pass]);
10777     }
10778     clear_vec_high(s, is_q, rd);
10779 }
10780 
10781 /* AdvSIMD three different
10782  *   31  30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
10783  * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10784  * | 0 | Q | U | 0 1 1 1 0 | size | 1 |  Rm  | opcode | 0 0 |  Rn  |  Rd  |
10785  * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10786  */
disas_simd_three_reg_diff(DisasContext * s,uint32_t insn)10787 static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
10788 {
10789     /* Instructions in this group fall into three basic classes
10790      * (in each case with the operation working on each element in
10791      * the input vectors):
10792      * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
10793      *     128 bit input)
10794      * (2) wide 64 x 128 -> 128
10795      * (3) narrowing 128 x 128 -> 64
10796      * Here we do initial decode, catch unallocated cases and
10797      * dispatch to separate functions for each class.
10798      */
10799     int is_q = extract32(insn, 30, 1);
10800     int is_u = extract32(insn, 29, 1);
10801     int size = extract32(insn, 22, 2);
10802     int opcode = extract32(insn, 12, 4);
10803     int rm = extract32(insn, 16, 5);
10804     int rn = extract32(insn, 5, 5);
10805     int rd = extract32(insn, 0, 5);
10806 
10807     switch (opcode) {
10808     case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
10809     case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
10810         /* 64 x 128 -> 128 */
10811         if (size == 3) {
10812             unallocated_encoding(s);
10813             return;
10814         }
10815         if (!fp_access_check(s)) {
10816             return;
10817         }
10818         handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
10819         break;
10820     case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
10821     case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
10822         /* 128 x 128 -> 64 */
10823         if (size == 3) {
10824             unallocated_encoding(s);
10825             return;
10826         }
10827         if (!fp_access_check(s)) {
10828             return;
10829         }
10830         handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
10831         break;
10832     case 14: /* PMULL, PMULL2 */
10833         if (is_u) {
10834             unallocated_encoding(s);
10835             return;
10836         }
10837         switch (size) {
10838         case 0: /* PMULL.P8 */
10839             if (!fp_access_check(s)) {
10840                 return;
10841             }
10842             /* The Q field specifies lo/hi half input for this insn.  */
10843             gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
10844                              gen_helper_neon_pmull_h);
10845             break;
10846 
10847         case 3: /* PMULL.P64 */
10848             if (!dc_isar_feature(aa64_pmull, s)) {
10849                 unallocated_encoding(s);
10850                 return;
10851             }
10852             if (!fp_access_check(s)) {
10853                 return;
10854             }
10855             /* The Q field specifies lo/hi half input for this insn.  */
10856             gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
10857                              gen_helper_gvec_pmull_q);
10858             break;
10859 
10860         default:
10861             unallocated_encoding(s);
10862             break;
10863         }
10864         return;
10865     case 9: /* SQDMLAL, SQDMLAL2 */
10866     case 11: /* SQDMLSL, SQDMLSL2 */
10867     case 13: /* SQDMULL, SQDMULL2 */
10868         if (is_u || size == 0) {
10869             unallocated_encoding(s);
10870             return;
10871         }
10872         /* fall through */
10873     case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10874     case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10875     case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10876     case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10877     case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10878     case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10879     case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
10880         /* 64 x 64 -> 128 */
10881         if (size == 3) {
10882             unallocated_encoding(s);
10883             return;
10884         }
10885         if (!fp_access_check(s)) {
10886             return;
10887         }
10888 
10889         handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
10890         break;
10891     default:
10892         /* opcode 15 not allocated */
10893         unallocated_encoding(s);
10894         break;
10895     }
10896 }
10897 
10898 /* Logic op (opcode == 3) subgroup of C3.6.16. */
disas_simd_3same_logic(DisasContext * s,uint32_t insn)10899 static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
10900 {
10901     int rd = extract32(insn, 0, 5);
10902     int rn = extract32(insn, 5, 5);
10903     int rm = extract32(insn, 16, 5);
10904     int size = extract32(insn, 22, 2);
10905     bool is_u = extract32(insn, 29, 1);
10906     bool is_q = extract32(insn, 30, 1);
10907 
10908     if (!fp_access_check(s)) {
10909         return;
10910     }
10911 
10912     switch (size + 4 * is_u) {
10913     case 0: /* AND */
10914         gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
10915         return;
10916     case 1: /* BIC */
10917         gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
10918         return;
10919     case 2: /* ORR */
10920         gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
10921         return;
10922     case 3: /* ORN */
10923         gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
10924         return;
10925     case 4: /* EOR */
10926         gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
10927         return;
10928 
10929     case 5: /* BSL bitwise select */
10930         gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0);
10931         return;
10932     case 6: /* BIT, bitwise insert if true */
10933         gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0);
10934         return;
10935     case 7: /* BIF, bitwise insert if false */
10936         gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0);
10937         return;
10938 
10939     default:
10940         g_assert_not_reached();
10941     }
10942 }
10943 
10944 /* Pairwise op subgroup of C3.6.16.
10945  *
10946  * This is called directly or via the handle_3same_float for float pairwise
10947  * operations where the opcode and size are calculated differently.
10948  */
handle_simd_3same_pair(DisasContext * s,int is_q,int u,int opcode,int size,int rn,int rm,int rd)10949 static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
10950                                    int size, int rn, int rm, int rd)
10951 {
10952     TCGv_ptr fpst;
10953     int pass;
10954 
10955     /* Floating point operations need fpst */
10956     if (opcode >= 0x58) {
10957         fpst = get_fpstatus_ptr(false);
10958     } else {
10959         fpst = NULL;
10960     }
10961 
10962     if (!fp_access_check(s)) {
10963         return;
10964     }
10965 
10966     /* These operations work on the concatenated rm:rn, with each pair of
10967      * adjacent elements being operated on to produce an element in the result.
10968      */
10969     if (size == 3) {
10970         TCGv_i64 tcg_res[2];
10971 
10972         for (pass = 0; pass < 2; pass++) {
10973             TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10974             TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10975             int passreg = (pass == 0) ? rn : rm;
10976 
10977             read_vec_element(s, tcg_op1, passreg, 0, MO_64);
10978             read_vec_element(s, tcg_op2, passreg, 1, MO_64);
10979             tcg_res[pass] = tcg_temp_new_i64();
10980 
10981             switch (opcode) {
10982             case 0x17: /* ADDP */
10983                 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
10984                 break;
10985             case 0x58: /* FMAXNMP */
10986                 gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10987                 break;
10988             case 0x5a: /* FADDP */
10989                 gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10990                 break;
10991             case 0x5e: /* FMAXP */
10992                 gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10993                 break;
10994             case 0x78: /* FMINNMP */
10995                 gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10996                 break;
10997             case 0x7e: /* FMINP */
10998                 gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10999                 break;
11000             default:
11001                 g_assert_not_reached();
11002             }
11003 
11004             tcg_temp_free_i64(tcg_op1);
11005             tcg_temp_free_i64(tcg_op2);
11006         }
11007 
11008         for (pass = 0; pass < 2; pass++) {
11009             write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11010             tcg_temp_free_i64(tcg_res[pass]);
11011         }
11012     } else {
11013         int maxpass = is_q ? 4 : 2;
11014         TCGv_i32 tcg_res[4];
11015 
11016         for (pass = 0; pass < maxpass; pass++) {
11017             TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11018             TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11019             NeonGenTwoOpFn *genfn = NULL;
11020             int passreg = pass < (maxpass / 2) ? rn : rm;
11021             int passelt = (is_q && (pass & 1)) ? 2 : 0;
11022 
11023             read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
11024             read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
11025             tcg_res[pass] = tcg_temp_new_i32();
11026 
11027             switch (opcode) {
11028             case 0x17: /* ADDP */
11029             {
11030                 static NeonGenTwoOpFn * const fns[3] = {
11031                     gen_helper_neon_padd_u8,
11032                     gen_helper_neon_padd_u16,
11033                     tcg_gen_add_i32,
11034                 };
11035                 genfn = fns[size];
11036                 break;
11037             }
11038             case 0x14: /* SMAXP, UMAXP */
11039             {
11040                 static NeonGenTwoOpFn * const fns[3][2] = {
11041                     { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
11042                     { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
11043                     { tcg_gen_smax_i32, tcg_gen_umax_i32 },
11044                 };
11045                 genfn = fns[size][u];
11046                 break;
11047             }
11048             case 0x15: /* SMINP, UMINP */
11049             {
11050                 static NeonGenTwoOpFn * const fns[3][2] = {
11051                     { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
11052                     { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
11053                     { tcg_gen_smin_i32, tcg_gen_umin_i32 },
11054                 };
11055                 genfn = fns[size][u];
11056                 break;
11057             }
11058             /* The FP operations are all on single floats (32 bit) */
11059             case 0x58: /* FMAXNMP */
11060                 gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11061                 break;
11062             case 0x5a: /* FADDP */
11063                 gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11064                 break;
11065             case 0x5e: /* FMAXP */
11066                 gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11067                 break;
11068             case 0x78: /* FMINNMP */
11069                 gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11070                 break;
11071             case 0x7e: /* FMINP */
11072                 gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11073                 break;
11074             default:
11075                 g_assert_not_reached();
11076             }
11077 
11078             /* FP ops called directly, otherwise call now */
11079             if (genfn) {
11080                 genfn(tcg_res[pass], tcg_op1, tcg_op2);
11081             }
11082 
11083             tcg_temp_free_i32(tcg_op1);
11084             tcg_temp_free_i32(tcg_op2);
11085         }
11086 
11087         for (pass = 0; pass < maxpass; pass++) {
11088             write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11089             tcg_temp_free_i32(tcg_res[pass]);
11090         }
11091         clear_vec_high(s, is_q, rd);
11092     }
11093 
11094     if (fpst) {
11095         tcg_temp_free_ptr(fpst);
11096     }
11097 }
11098 
11099 /* Floating point op subgroup of C3.6.16. */
disas_simd_3same_float(DisasContext * s,uint32_t insn)11100 static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
11101 {
11102     /* For floating point ops, the U, size[1] and opcode bits
11103      * together indicate the operation. size[0] indicates single
11104      * or double.
11105      */
11106     int fpopcode = extract32(insn, 11, 5)
11107         | (extract32(insn, 23, 1) << 5)
11108         | (extract32(insn, 29, 1) << 6);
11109     int is_q = extract32(insn, 30, 1);
11110     int size = extract32(insn, 22, 1);
11111     int rm = extract32(insn, 16, 5);
11112     int rn = extract32(insn, 5, 5);
11113     int rd = extract32(insn, 0, 5);
11114 
11115     int datasize = is_q ? 128 : 64;
11116     int esize = 32 << size;
11117     int elements = datasize / esize;
11118 
11119     if (size == 1 && !is_q) {
11120         unallocated_encoding(s);
11121         return;
11122     }
11123 
11124     switch (fpopcode) {
11125     case 0x58: /* FMAXNMP */
11126     case 0x5a: /* FADDP */
11127     case 0x5e: /* FMAXP */
11128     case 0x78: /* FMINNMP */
11129     case 0x7e: /* FMINP */
11130         if (size && !is_q) {
11131             unallocated_encoding(s);
11132             return;
11133         }
11134         handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
11135                                rn, rm, rd);
11136         return;
11137     case 0x1b: /* FMULX */
11138     case 0x1f: /* FRECPS */
11139     case 0x3f: /* FRSQRTS */
11140     case 0x5d: /* FACGE */
11141     case 0x7d: /* FACGT */
11142     case 0x19: /* FMLA */
11143     case 0x39: /* FMLS */
11144     case 0x18: /* FMAXNM */
11145     case 0x1a: /* FADD */
11146     case 0x1c: /* FCMEQ */
11147     case 0x1e: /* FMAX */
11148     case 0x38: /* FMINNM */
11149     case 0x3a: /* FSUB */
11150     case 0x3e: /* FMIN */
11151     case 0x5b: /* FMUL */
11152     case 0x5c: /* FCMGE */
11153     case 0x5f: /* FDIV */
11154     case 0x7a: /* FABD */
11155     case 0x7c: /* FCMGT */
11156         if (!fp_access_check(s)) {
11157             return;
11158         }
11159         handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
11160         return;
11161 
11162     case 0x1d: /* FMLAL  */
11163     case 0x3d: /* FMLSL  */
11164     case 0x59: /* FMLAL2 */
11165     case 0x79: /* FMLSL2 */
11166         if (size & 1 || !dc_isar_feature(aa64_fhm, s)) {
11167             unallocated_encoding(s);
11168             return;
11169         }
11170         if (fp_access_check(s)) {
11171             int is_s = extract32(insn, 23, 1);
11172             int is_2 = extract32(insn, 29, 1);
11173             int data = (is_2 << 1) | is_s;
11174             tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
11175                                vec_full_reg_offset(s, rn),
11176                                vec_full_reg_offset(s, rm), cpu_env,
11177                                is_q ? 16 : 8, vec_full_reg_size(s),
11178                                data, gen_helper_gvec_fmlal_a64);
11179         }
11180         return;
11181 
11182     default:
11183         unallocated_encoding(s);
11184         return;
11185     }
11186 }
11187 
11188 /* Integer op subgroup of C3.6.16. */
disas_simd_3same_int(DisasContext * s,uint32_t insn)11189 static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
11190 {
11191     int is_q = extract32(insn, 30, 1);
11192     int u = extract32(insn, 29, 1);
11193     int size = extract32(insn, 22, 2);
11194     int opcode = extract32(insn, 11, 5);
11195     int rm = extract32(insn, 16, 5);
11196     int rn = extract32(insn, 5, 5);
11197     int rd = extract32(insn, 0, 5);
11198     int pass;
11199     TCGCond cond;
11200 
11201     switch (opcode) {
11202     case 0x13: /* MUL, PMUL */
11203         if (u && size != 0) {
11204             unallocated_encoding(s);
11205             return;
11206         }
11207         /* fall through */
11208     case 0x0: /* SHADD, UHADD */
11209     case 0x2: /* SRHADD, URHADD */
11210     case 0x4: /* SHSUB, UHSUB */
11211     case 0xc: /* SMAX, UMAX */
11212     case 0xd: /* SMIN, UMIN */
11213     case 0xe: /* SABD, UABD */
11214     case 0xf: /* SABA, UABA */
11215     case 0x12: /* MLA, MLS */
11216         if (size == 3) {
11217             unallocated_encoding(s);
11218             return;
11219         }
11220         break;
11221     case 0x16: /* SQDMULH, SQRDMULH */
11222         if (size == 0 || size == 3) {
11223             unallocated_encoding(s);
11224             return;
11225         }
11226         break;
11227     default:
11228         if (size == 3 && !is_q) {
11229             unallocated_encoding(s);
11230             return;
11231         }
11232         break;
11233     }
11234 
11235     if (!fp_access_check(s)) {
11236         return;
11237     }
11238 
11239     switch (opcode) {
11240     case 0x01: /* SQADD, UQADD */
11241         tcg_gen_gvec_4(vec_full_reg_offset(s, rd),
11242                        offsetof(CPUARMState, vfp.qc),
11243                        vec_full_reg_offset(s, rn),
11244                        vec_full_reg_offset(s, rm),
11245                        is_q ? 16 : 8, vec_full_reg_size(s),
11246                        (u ? uqadd_op : sqadd_op) + size);
11247         return;
11248     case 0x05: /* SQSUB, UQSUB */
11249         tcg_gen_gvec_4(vec_full_reg_offset(s, rd),
11250                        offsetof(CPUARMState, vfp.qc),
11251                        vec_full_reg_offset(s, rn),
11252                        vec_full_reg_offset(s, rm),
11253                        is_q ? 16 : 8, vec_full_reg_size(s),
11254                        (u ? uqsub_op : sqsub_op) + size);
11255         return;
11256     case 0x08: /* SSHL, USHL */
11257         gen_gvec_op3(s, is_q, rd, rn, rm,
11258                      u ? &ushl_op[size] : &sshl_op[size]);
11259         return;
11260     case 0x0c: /* SMAX, UMAX */
11261         if (u) {
11262             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size);
11263         } else {
11264             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smax, size);
11265         }
11266         return;
11267     case 0x0d: /* SMIN, UMIN */
11268         if (u) {
11269             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umin, size);
11270         } else {
11271             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smin, size);
11272         }
11273         return;
11274     case 0x10: /* ADD, SUB */
11275         if (u) {
11276             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
11277         } else {
11278             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
11279         }
11280         return;
11281     case 0x13: /* MUL, PMUL */
11282         if (!u) { /* MUL */
11283             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
11284         } else {  /* PMUL */
11285             gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b);
11286         }
11287         return;
11288     case 0x12: /* MLA, MLS */
11289         if (u) {
11290             gen_gvec_op3(s, is_q, rd, rn, rm, &mls_op[size]);
11291         } else {
11292             gen_gvec_op3(s, is_q, rd, rn, rm, &mla_op[size]);
11293         }
11294         return;
11295     case 0x11:
11296         if (!u) { /* CMTST */
11297             gen_gvec_op3(s, is_q, rd, rn, rm, &cmtst_op[size]);
11298             return;
11299         }
11300         /* else CMEQ */
11301         cond = TCG_COND_EQ;
11302         goto do_gvec_cmp;
11303     case 0x06: /* CMGT, CMHI */
11304         cond = u ? TCG_COND_GTU : TCG_COND_GT;
11305         goto do_gvec_cmp;
11306     case 0x07: /* CMGE, CMHS */
11307         cond = u ? TCG_COND_GEU : TCG_COND_GE;
11308     do_gvec_cmp:
11309         tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
11310                          vec_full_reg_offset(s, rn),
11311                          vec_full_reg_offset(s, rm),
11312                          is_q ? 16 : 8, vec_full_reg_size(s));
11313         return;
11314     }
11315 
11316     if (size == 3) {
11317         assert(is_q);
11318         for (pass = 0; pass < 2; pass++) {
11319             TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11320             TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11321             TCGv_i64 tcg_res = tcg_temp_new_i64();
11322 
11323             read_vec_element(s, tcg_op1, rn, pass, MO_64);
11324             read_vec_element(s, tcg_op2, rm, pass, MO_64);
11325 
11326             handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
11327 
11328             write_vec_element(s, tcg_res, rd, pass, MO_64);
11329 
11330             tcg_temp_free_i64(tcg_res);
11331             tcg_temp_free_i64(tcg_op1);
11332             tcg_temp_free_i64(tcg_op2);
11333         }
11334     } else {
11335         for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
11336             TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11337             TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11338             TCGv_i32 tcg_res = tcg_temp_new_i32();
11339             NeonGenTwoOpFn *genfn = NULL;
11340             NeonGenTwoOpEnvFn *genenvfn = NULL;
11341 
11342             read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
11343             read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
11344 
11345             switch (opcode) {
11346             case 0x0: /* SHADD, UHADD */
11347             {
11348                 static NeonGenTwoOpFn * const fns[3][2] = {
11349                     { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
11350                     { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
11351                     { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
11352                 };
11353                 genfn = fns[size][u];
11354                 break;
11355             }
11356             case 0x2: /* SRHADD, URHADD */
11357             {
11358                 static NeonGenTwoOpFn * const fns[3][2] = {
11359                     { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
11360                     { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
11361                     { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
11362                 };
11363                 genfn = fns[size][u];
11364                 break;
11365             }
11366             case 0x4: /* SHSUB, UHSUB */
11367             {
11368                 static NeonGenTwoOpFn * const fns[3][2] = {
11369                     { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
11370                     { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
11371                     { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
11372                 };
11373                 genfn = fns[size][u];
11374                 break;
11375             }
11376             case 0x9: /* SQSHL, UQSHL */
11377             {
11378                 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11379                     { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
11380                     { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
11381                     { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
11382                 };
11383                 genenvfn = fns[size][u];
11384                 break;
11385             }
11386             case 0xa: /* SRSHL, URSHL */
11387             {
11388                 static NeonGenTwoOpFn * const fns[3][2] = {
11389                     { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
11390                     { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
11391                     { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
11392                 };
11393                 genfn = fns[size][u];
11394                 break;
11395             }
11396             case 0xb: /* SQRSHL, UQRSHL */
11397             {
11398                 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11399                     { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
11400                     { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
11401                     { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
11402                 };
11403                 genenvfn = fns[size][u];
11404                 break;
11405             }
11406             case 0xe: /* SABD, UABD */
11407             case 0xf: /* SABA, UABA */
11408             {
11409                 static NeonGenTwoOpFn * const fns[3][2] = {
11410                     { gen_helper_neon_abd_s8, gen_helper_neon_abd_u8 },
11411                     { gen_helper_neon_abd_s16, gen_helper_neon_abd_u16 },
11412                     { gen_helper_neon_abd_s32, gen_helper_neon_abd_u32 },
11413                 };
11414                 genfn = fns[size][u];
11415                 break;
11416             }
11417             case 0x16: /* SQDMULH, SQRDMULH */
11418             {
11419                 static NeonGenTwoOpEnvFn * const fns[2][2] = {
11420                     { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
11421                     { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
11422                 };
11423                 assert(size == 1 || size == 2);
11424                 genenvfn = fns[size - 1][u];
11425                 break;
11426             }
11427             default:
11428                 g_assert_not_reached();
11429             }
11430 
11431             if (genenvfn) {
11432                 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
11433             } else {
11434                 genfn(tcg_res, tcg_op1, tcg_op2);
11435             }
11436 
11437             if (opcode == 0xf) {
11438                 /* SABA, UABA: accumulating ops */
11439                 static NeonGenTwoOpFn * const fns[3] = {
11440                     gen_helper_neon_add_u8,
11441                     gen_helper_neon_add_u16,
11442                     tcg_gen_add_i32,
11443                 };
11444 
11445                 read_vec_element_i32(s, tcg_op1, rd, pass, MO_32);
11446                 fns[size](tcg_res, tcg_op1, tcg_res);
11447             }
11448 
11449             write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
11450 
11451             tcg_temp_free_i32(tcg_res);
11452             tcg_temp_free_i32(tcg_op1);
11453             tcg_temp_free_i32(tcg_op2);
11454         }
11455     }
11456     clear_vec_high(s, is_q, rd);
11457 }
11458 
11459 /* AdvSIMD three same
11460  *  31  30  29  28       24 23  22  21 20  16 15    11  10 9    5 4    0
11461  * +---+---+---+-----------+------+---+------+--------+---+------+------+
11462  * | 0 | Q | U | 0 1 1 1 0 | size | 1 |  Rm  | opcode | 1 |  Rn  |  Rd  |
11463  * +---+---+---+-----------+------+---+------+--------+---+------+------+
11464  */
disas_simd_three_reg_same(DisasContext * s,uint32_t insn)11465 static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
11466 {
11467     int opcode = extract32(insn, 11, 5);
11468 
11469     switch (opcode) {
11470     case 0x3: /* logic ops */
11471         disas_simd_3same_logic(s, insn);
11472         break;
11473     case 0x17: /* ADDP */
11474     case 0x14: /* SMAXP, UMAXP */
11475     case 0x15: /* SMINP, UMINP */
11476     {
11477         /* Pairwise operations */
11478         int is_q = extract32(insn, 30, 1);
11479         int u = extract32(insn, 29, 1);
11480         int size = extract32(insn, 22, 2);
11481         int rm = extract32(insn, 16, 5);
11482         int rn = extract32(insn, 5, 5);
11483         int rd = extract32(insn, 0, 5);
11484         if (opcode == 0x17) {
11485             if (u || (size == 3 && !is_q)) {
11486                 unallocated_encoding(s);
11487                 return;
11488             }
11489         } else {
11490             if (size == 3) {
11491                 unallocated_encoding(s);
11492                 return;
11493             }
11494         }
11495         handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
11496         break;
11497     }
11498     case 0x18 ... 0x31:
11499         /* floating point ops, sz[1] and U are part of opcode */
11500         disas_simd_3same_float(s, insn);
11501         break;
11502     default:
11503         disas_simd_3same_int(s, insn);
11504         break;
11505     }
11506 }
11507 
11508 /*
11509  * Advanced SIMD three same (ARMv8.2 FP16 variants)
11510  *
11511  *  31  30  29  28       24 23  22 21 20  16 15 14 13    11 10  9    5 4    0
11512  * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11513  * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 |  Rm  | 0 0 | opcode | 1 |  Rn  |  Rd  |
11514  * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11515  *
11516  * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE
11517  * (register), FACGE, FABD, FCMGT (register) and FACGT.
11518  *
11519  */
disas_simd_three_reg_same_fp16(DisasContext * s,uint32_t insn)11520 static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
11521 {
11522     int opcode, fpopcode;
11523     int is_q, u, a, rm, rn, rd;
11524     int datasize, elements;
11525     int pass;
11526     TCGv_ptr fpst;
11527     bool pairwise = false;
11528 
11529     if (!dc_isar_feature(aa64_fp16, s)) {
11530         unallocated_encoding(s);
11531         return;
11532     }
11533 
11534     if (!fp_access_check(s)) {
11535         return;
11536     }
11537 
11538     /* For these floating point ops, the U, a and opcode bits
11539      * together indicate the operation.
11540      */
11541     opcode = extract32(insn, 11, 3);
11542     u = extract32(insn, 29, 1);
11543     a = extract32(insn, 23, 1);
11544     is_q = extract32(insn, 30, 1);
11545     rm = extract32(insn, 16, 5);
11546     rn = extract32(insn, 5, 5);
11547     rd = extract32(insn, 0, 5);
11548 
11549     fpopcode = opcode | (a << 3) |  (u << 4);
11550     datasize = is_q ? 128 : 64;
11551     elements = datasize / 16;
11552 
11553     switch (fpopcode) {
11554     case 0x10: /* FMAXNMP */
11555     case 0x12: /* FADDP */
11556     case 0x16: /* FMAXP */
11557     case 0x18: /* FMINNMP */
11558     case 0x1e: /* FMINP */
11559         pairwise = true;
11560         break;
11561     }
11562 
11563     fpst = get_fpstatus_ptr(true);
11564 
11565     if (pairwise) {
11566         int maxpass = is_q ? 8 : 4;
11567         TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11568         TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11569         TCGv_i32 tcg_res[8];
11570 
11571         for (pass = 0; pass < maxpass; pass++) {
11572             int passreg = pass < (maxpass / 2) ? rn : rm;
11573             int passelt = (pass << 1) & (maxpass - 1);
11574 
11575             read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16);
11576             read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16);
11577             tcg_res[pass] = tcg_temp_new_i32();
11578 
11579             switch (fpopcode) {
11580             case 0x10: /* FMAXNMP */
11581                 gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
11582                                            fpst);
11583                 break;
11584             case 0x12: /* FADDP */
11585                 gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11586                 break;
11587             case 0x16: /* FMAXP */
11588                 gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11589                 break;
11590             case 0x18: /* FMINNMP */
11591                 gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2,
11592                                            fpst);
11593                 break;
11594             case 0x1e: /* FMINP */
11595                 gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11596                 break;
11597             default:
11598                 g_assert_not_reached();
11599             }
11600         }
11601 
11602         for (pass = 0; pass < maxpass; pass++) {
11603             write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16);
11604             tcg_temp_free_i32(tcg_res[pass]);
11605         }
11606 
11607         tcg_temp_free_i32(tcg_op1);
11608         tcg_temp_free_i32(tcg_op2);
11609 
11610     } else {
11611         for (pass = 0; pass < elements; pass++) {
11612             TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11613             TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11614             TCGv_i32 tcg_res = tcg_temp_new_i32();
11615 
11616             read_vec_element_i32(s, tcg_op1, rn, pass, MO_16);
11617             read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
11618 
11619             switch (fpopcode) {
11620             case 0x0: /* FMAXNM */
11621                 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
11622                 break;
11623             case 0x1: /* FMLA */
11624                 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11625                 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
11626                                            fpst);
11627                 break;
11628             case 0x2: /* FADD */
11629                 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
11630                 break;
11631             case 0x3: /* FMULX */
11632                 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
11633                 break;
11634             case 0x4: /* FCMEQ */
11635                 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11636                 break;
11637             case 0x6: /* FMAX */
11638                 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
11639                 break;
11640             case 0x7: /* FRECPS */
11641                 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11642                 break;
11643             case 0x8: /* FMINNM */
11644                 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
11645                 break;
11646             case 0x9: /* FMLS */
11647                 /* As usual for ARM, separate negation for fused multiply-add */
11648                 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
11649                 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11650                 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
11651                                            fpst);
11652                 break;
11653             case 0xa: /* FSUB */
11654                 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
11655                 break;
11656             case 0xe: /* FMIN */
11657                 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
11658                 break;
11659             case 0xf: /* FRSQRTS */
11660                 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11661                 break;
11662             case 0x13: /* FMUL */
11663                 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
11664                 break;
11665             case 0x14: /* FCMGE */
11666                 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11667                 break;
11668             case 0x15: /* FACGE */
11669                 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11670                 break;
11671             case 0x17: /* FDIV */
11672                 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
11673                 break;
11674             case 0x1a: /* FABD */
11675                 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
11676                 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
11677                 break;
11678             case 0x1c: /* FCMGT */
11679                 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11680                 break;
11681             case 0x1d: /* FACGT */
11682                 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11683                 break;
11684             default:
11685                 fprintf(stderr, "%s: insn %#04x, fpop %#2x @ %#" PRIx64 "\n",
11686                         __func__, insn, fpopcode, s->pc_curr);
11687                 g_assert_not_reached();
11688             }
11689 
11690             write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11691             tcg_temp_free_i32(tcg_res);
11692             tcg_temp_free_i32(tcg_op1);
11693             tcg_temp_free_i32(tcg_op2);
11694         }
11695     }
11696 
11697     tcg_temp_free_ptr(fpst);
11698 
11699     clear_vec_high(s, is_q, rd);
11700 }
11701 
11702 /* AdvSIMD three same extra
11703  *  31   30  29 28       24 23  22  21 20  16  15 14    11  10 9  5 4  0
11704  * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11705  * | 0 | Q | U | 0 1 1 1 0 | size | 0 |  Rm  | 1 | opcode | 1 | Rn | Rd |
11706  * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11707  */
disas_simd_three_reg_same_extra(DisasContext * s,uint32_t insn)11708 static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
11709 {
11710     int rd = extract32(insn, 0, 5);
11711     int rn = extract32(insn, 5, 5);
11712     int opcode = extract32(insn, 11, 4);
11713     int rm = extract32(insn, 16, 5);
11714     int size = extract32(insn, 22, 2);
11715     bool u = extract32(insn, 29, 1);
11716     bool is_q = extract32(insn, 30, 1);
11717     bool feature;
11718     int rot;
11719 
11720     switch (u * 16 + opcode) {
11721     case 0x10: /* SQRDMLAH (vector) */
11722     case 0x11: /* SQRDMLSH (vector) */
11723         if (size != 1 && size != 2) {
11724             unallocated_encoding(s);
11725             return;
11726         }
11727         feature = dc_isar_feature(aa64_rdm, s);
11728         break;
11729     case 0x02: /* SDOT (vector) */
11730     case 0x12: /* UDOT (vector) */
11731         if (size != MO_32) {
11732             unallocated_encoding(s);
11733             return;
11734         }
11735         feature = dc_isar_feature(aa64_dp, s);
11736         break;
11737     case 0x18: /* FCMLA, #0 */
11738     case 0x19: /* FCMLA, #90 */
11739     case 0x1a: /* FCMLA, #180 */
11740     case 0x1b: /* FCMLA, #270 */
11741     case 0x1c: /* FCADD, #90 */
11742     case 0x1e: /* FCADD, #270 */
11743         if (size == 0
11744             || (size == 1 && !dc_isar_feature(aa64_fp16, s))
11745             || (size == 3 && !is_q)) {
11746             unallocated_encoding(s);
11747             return;
11748         }
11749         feature = dc_isar_feature(aa64_fcma, s);
11750         break;
11751     default:
11752         unallocated_encoding(s);
11753         return;
11754     }
11755     if (!feature) {
11756         unallocated_encoding(s);
11757         return;
11758     }
11759     if (!fp_access_check(s)) {
11760         return;
11761     }
11762 
11763     switch (opcode) {
11764     case 0x0: /* SQRDMLAH (vector) */
11765         switch (size) {
11766         case 1:
11767             gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlah_s16);
11768             break;
11769         case 2:
11770             gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlah_s32);
11771             break;
11772         default:
11773             g_assert_not_reached();
11774         }
11775         return;
11776 
11777     case 0x1: /* SQRDMLSH (vector) */
11778         switch (size) {
11779         case 1:
11780             gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlsh_s16);
11781             break;
11782         case 2:
11783             gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlsh_s32);
11784             break;
11785         default:
11786             g_assert_not_reached();
11787         }
11788         return;
11789 
11790     case 0x2: /* SDOT / UDOT */
11791         gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0,
11792                          u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
11793         return;
11794 
11795     case 0x8: /* FCMLA, #0 */
11796     case 0x9: /* FCMLA, #90 */
11797     case 0xa: /* FCMLA, #180 */
11798     case 0xb: /* FCMLA, #270 */
11799         rot = extract32(opcode, 0, 2);
11800         switch (size) {
11801         case 1:
11802             gen_gvec_op3_fpst(s, is_q, rd, rn, rm, true, rot,
11803                               gen_helper_gvec_fcmlah);
11804             break;
11805         case 2:
11806             gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
11807                               gen_helper_gvec_fcmlas);
11808             break;
11809         case 3:
11810             gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
11811                               gen_helper_gvec_fcmlad);
11812             break;
11813         default:
11814             g_assert_not_reached();
11815         }
11816         return;
11817 
11818     case 0xc: /* FCADD, #90 */
11819     case 0xe: /* FCADD, #270 */
11820         rot = extract32(opcode, 1, 1);
11821         switch (size) {
11822         case 1:
11823             gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11824                               gen_helper_gvec_fcaddh);
11825             break;
11826         case 2:
11827             gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11828                               gen_helper_gvec_fcadds);
11829             break;
11830         case 3:
11831             gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11832                               gen_helper_gvec_fcaddd);
11833             break;
11834         default:
11835             g_assert_not_reached();
11836         }
11837         return;
11838 
11839     default:
11840         g_assert_not_reached();
11841     }
11842 }
11843 
handle_2misc_widening(DisasContext * s,int opcode,bool is_q,int size,int rn,int rd)11844 static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
11845                                   int size, int rn, int rd)
11846 {
11847     /* Handle 2-reg-misc ops which are widening (so each size element
11848      * in the source becomes a 2*size element in the destination.
11849      * The only instruction like this is FCVTL.
11850      */
11851     int pass;
11852 
11853     if (size == 3) {
11854         /* 32 -> 64 bit fp conversion */
11855         TCGv_i64 tcg_res[2];
11856         int srcelt = is_q ? 2 : 0;
11857 
11858         for (pass = 0; pass < 2; pass++) {
11859             TCGv_i32 tcg_op = tcg_temp_new_i32();
11860             tcg_res[pass] = tcg_temp_new_i64();
11861 
11862             read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
11863             gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
11864             tcg_temp_free_i32(tcg_op);
11865         }
11866         for (pass = 0; pass < 2; pass++) {
11867             write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11868             tcg_temp_free_i64(tcg_res[pass]);
11869         }
11870     } else {
11871         /* 16 -> 32 bit fp conversion */
11872         int srcelt = is_q ? 4 : 0;
11873         TCGv_i32 tcg_res[4];
11874         TCGv_ptr fpst = get_fpstatus_ptr(false);
11875         TCGv_i32 ahp = get_ahp_flag();
11876 
11877         for (pass = 0; pass < 4; pass++) {
11878             tcg_res[pass] = tcg_temp_new_i32();
11879 
11880             read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
11881             gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
11882                                            fpst, ahp);
11883         }
11884         for (pass = 0; pass < 4; pass++) {
11885             write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11886             tcg_temp_free_i32(tcg_res[pass]);
11887         }
11888 
11889         tcg_temp_free_ptr(fpst);
11890         tcg_temp_free_i32(ahp);
11891     }
11892 }
11893 
handle_rev(DisasContext * s,int opcode,bool u,bool is_q,int size,int rn,int rd)11894 static void handle_rev(DisasContext *s, int opcode, bool u,
11895                        bool is_q, int size, int rn, int rd)
11896 {
11897     int op = (opcode << 1) | u;
11898     int opsz = op + size;
11899     int grp_size = 3 - opsz;
11900     int dsize = is_q ? 128 : 64;
11901     int i;
11902 
11903     if (opsz >= 3) {
11904         unallocated_encoding(s);
11905         return;
11906     }
11907 
11908     if (!fp_access_check(s)) {
11909         return;
11910     }
11911 
11912     if (size == 0) {
11913         /* Special case bytes, use bswap op on each group of elements */
11914         int groups = dsize / (8 << grp_size);
11915 
11916         for (i = 0; i < groups; i++) {
11917             TCGv_i64 tcg_tmp = tcg_temp_new_i64();
11918 
11919             read_vec_element(s, tcg_tmp, rn, i, grp_size);
11920             switch (grp_size) {
11921             case MO_16:
11922                 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
11923                 break;
11924             case MO_32:
11925                 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
11926                 break;
11927             case MO_64:
11928                 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
11929                 break;
11930             default:
11931                 g_assert_not_reached();
11932             }
11933             write_vec_element(s, tcg_tmp, rd, i, grp_size);
11934             tcg_temp_free_i64(tcg_tmp);
11935         }
11936         clear_vec_high(s, is_q, rd);
11937     } else {
11938         int revmask = (1 << grp_size) - 1;
11939         int esize = 8 << size;
11940         int elements = dsize / esize;
11941         TCGv_i64 tcg_rn = tcg_temp_new_i64();
11942         TCGv_i64 tcg_rd = tcg_const_i64(0);
11943         TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
11944 
11945         for (i = 0; i < elements; i++) {
11946             int e_rev = (i & 0xf) ^ revmask;
11947             int off = e_rev * esize;
11948             read_vec_element(s, tcg_rn, rn, i, size);
11949             if (off >= 64) {
11950                 tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
11951                                     tcg_rn, off - 64, esize);
11952             } else {
11953                 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
11954             }
11955         }
11956         write_vec_element(s, tcg_rd, rd, 0, MO_64);
11957         write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
11958 
11959         tcg_temp_free_i64(tcg_rd_hi);
11960         tcg_temp_free_i64(tcg_rd);
11961         tcg_temp_free_i64(tcg_rn);
11962     }
11963 }
11964 
handle_2misc_pairwise(DisasContext * s,int opcode,bool u,bool is_q,int size,int rn,int rd)11965 static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
11966                                   bool is_q, int size, int rn, int rd)
11967 {
11968     /* Implement the pairwise operations from 2-misc:
11969      * SADDLP, UADDLP, SADALP, UADALP.
11970      * These all add pairs of elements in the input to produce a
11971      * double-width result element in the output (possibly accumulating).
11972      */
11973     bool accum = (opcode == 0x6);
11974     int maxpass = is_q ? 2 : 1;
11975     int pass;
11976     TCGv_i64 tcg_res[2];
11977 
11978     if (size == 2) {
11979         /* 32 + 32 -> 64 op */
11980         MemOp memop = size + (u ? 0 : MO_SIGN);
11981 
11982         for (pass = 0; pass < maxpass; pass++) {
11983             TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11984             TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11985 
11986             tcg_res[pass] = tcg_temp_new_i64();
11987 
11988             read_vec_element(s, tcg_op1, rn, pass * 2, memop);
11989             read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
11990             tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
11991             if (accum) {
11992                 read_vec_element(s, tcg_op1, rd, pass, MO_64);
11993                 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
11994             }
11995 
11996             tcg_temp_free_i64(tcg_op1);
11997             tcg_temp_free_i64(tcg_op2);
11998         }
11999     } else {
12000         for (pass = 0; pass < maxpass; pass++) {
12001             TCGv_i64 tcg_op = tcg_temp_new_i64();
12002             NeonGenOneOpFn *genfn;
12003             static NeonGenOneOpFn * const fns[2][2] = {
12004                 { gen_helper_neon_addlp_s8,  gen_helper_neon_addlp_u8 },
12005                 { gen_helper_neon_addlp_s16,  gen_helper_neon_addlp_u16 },
12006             };
12007 
12008             genfn = fns[size][u];
12009 
12010             tcg_res[pass] = tcg_temp_new_i64();
12011 
12012             read_vec_element(s, tcg_op, rn, pass, MO_64);
12013             genfn(tcg_res[pass], tcg_op);
12014 
12015             if (accum) {
12016                 read_vec_element(s, tcg_op, rd, pass, MO_64);
12017                 if (size == 0) {
12018                     gen_helper_neon_addl_u16(tcg_res[pass],
12019                                              tcg_res[pass], tcg_op);
12020                 } else {
12021                     gen_helper_neon_addl_u32(tcg_res[pass],
12022                                              tcg_res[pass], tcg_op);
12023                 }
12024             }
12025             tcg_temp_free_i64(tcg_op);
12026         }
12027     }
12028     if (!is_q) {
12029         tcg_res[1] = tcg_const_i64(0);
12030     }
12031     for (pass = 0; pass < 2; pass++) {
12032         write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12033         tcg_temp_free_i64(tcg_res[pass]);
12034     }
12035 }
12036 
handle_shll(DisasContext * s,bool is_q,int size,int rn,int rd)12037 static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
12038 {
12039     /* Implement SHLL and SHLL2 */
12040     int pass;
12041     int part = is_q ? 2 : 0;
12042     TCGv_i64 tcg_res[2];
12043 
12044     for (pass = 0; pass < 2; pass++) {
12045         static NeonGenWidenFn * const widenfns[3] = {
12046             gen_helper_neon_widen_u8,
12047             gen_helper_neon_widen_u16,
12048             tcg_gen_extu_i32_i64,
12049         };
12050         NeonGenWidenFn *widenfn = widenfns[size];
12051         TCGv_i32 tcg_op = tcg_temp_new_i32();
12052 
12053         read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
12054         tcg_res[pass] = tcg_temp_new_i64();
12055         widenfn(tcg_res[pass], tcg_op);
12056         tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
12057 
12058         tcg_temp_free_i32(tcg_op);
12059     }
12060 
12061     for (pass = 0; pass < 2; pass++) {
12062         write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12063         tcg_temp_free_i64(tcg_res[pass]);
12064     }
12065 }
12066 
12067 /* AdvSIMD two reg misc
12068  *   31  30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
12069  * +---+---+---+-----------+------+-----------+--------+-----+------+------+
12070  * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
12071  * +---+---+---+-----------+------+-----------+--------+-----+------+------+
12072  */
disas_simd_two_reg_misc(DisasContext * s,uint32_t insn)12073 static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
12074 {
12075     int size = extract32(insn, 22, 2);
12076     int opcode = extract32(insn, 12, 5);
12077     bool u = extract32(insn, 29, 1);
12078     bool is_q = extract32(insn, 30, 1);
12079     int rn = extract32(insn, 5, 5);
12080     int rd = extract32(insn, 0, 5);
12081     bool need_fpstatus = false;
12082     bool need_rmode = false;
12083     int rmode = -1;
12084     TCGv_i32 tcg_rmode;
12085     TCGv_ptr tcg_fpstatus;
12086 
12087     switch (opcode) {
12088     case 0x0: /* REV64, REV32 */
12089     case 0x1: /* REV16 */
12090         handle_rev(s, opcode, u, is_q, size, rn, rd);
12091         return;
12092     case 0x5: /* CNT, NOT, RBIT */
12093         if (u && size == 0) {
12094             /* NOT */
12095             break;
12096         } else if (u && size == 1) {
12097             /* RBIT */
12098             break;
12099         } else if (!u && size == 0) {
12100             /* CNT */
12101             break;
12102         }
12103         unallocated_encoding(s);
12104         return;
12105     case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
12106     case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
12107         if (size == 3) {
12108             unallocated_encoding(s);
12109             return;
12110         }
12111         if (!fp_access_check(s)) {
12112             return;
12113         }
12114 
12115         handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
12116         return;
12117     case 0x4: /* CLS, CLZ */
12118         if (size == 3) {
12119             unallocated_encoding(s);
12120             return;
12121         }
12122         break;
12123     case 0x2: /* SADDLP, UADDLP */
12124     case 0x6: /* SADALP, UADALP */
12125         if (size == 3) {
12126             unallocated_encoding(s);
12127             return;
12128         }
12129         if (!fp_access_check(s)) {
12130             return;
12131         }
12132         handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
12133         return;
12134     case 0x13: /* SHLL, SHLL2 */
12135         if (u == 0 || size == 3) {
12136             unallocated_encoding(s);
12137             return;
12138         }
12139         if (!fp_access_check(s)) {
12140             return;
12141         }
12142         handle_shll(s, is_q, size, rn, rd);
12143         return;
12144     case 0xa: /* CMLT */
12145         if (u == 1) {
12146             unallocated_encoding(s);
12147             return;
12148         }
12149         /* fall through */
12150     case 0x8: /* CMGT, CMGE */
12151     case 0x9: /* CMEQ, CMLE */
12152     case 0xb: /* ABS, NEG */
12153         if (size == 3 && !is_q) {
12154             unallocated_encoding(s);
12155             return;
12156         }
12157         break;
12158     case 0x3: /* SUQADD, USQADD */
12159         if (size == 3 && !is_q) {
12160             unallocated_encoding(s);
12161             return;
12162         }
12163         if (!fp_access_check(s)) {
12164             return;
12165         }
12166         handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
12167         return;
12168     case 0x7: /* SQABS, SQNEG */
12169         if (size == 3 && !is_q) {
12170             unallocated_encoding(s);
12171             return;
12172         }
12173         break;
12174     case 0xc ... 0xf:
12175     case 0x16 ... 0x1f:
12176     {
12177         /* Floating point: U, size[1] and opcode indicate operation;
12178          * size[0] indicates single or double precision.
12179          */
12180         int is_double = extract32(size, 0, 1);
12181         opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
12182         size = is_double ? 3 : 2;
12183         switch (opcode) {
12184         case 0x2f: /* FABS */
12185         case 0x6f: /* FNEG */
12186             if (size == 3 && !is_q) {
12187                 unallocated_encoding(s);
12188                 return;
12189             }
12190             break;
12191         case 0x1d: /* SCVTF */
12192         case 0x5d: /* UCVTF */
12193         {
12194             bool is_signed = (opcode == 0x1d) ? true : false;
12195             int elements = is_double ? 2 : is_q ? 4 : 2;
12196             if (is_double && !is_q) {
12197                 unallocated_encoding(s);
12198                 return;
12199             }
12200             if (!fp_access_check(s)) {
12201                 return;
12202             }
12203             handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
12204             return;
12205         }
12206         case 0x2c: /* FCMGT (zero) */
12207         case 0x2d: /* FCMEQ (zero) */
12208         case 0x2e: /* FCMLT (zero) */
12209         case 0x6c: /* FCMGE (zero) */
12210         case 0x6d: /* FCMLE (zero) */
12211             if (size == 3 && !is_q) {
12212                 unallocated_encoding(s);
12213                 return;
12214             }
12215             handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
12216             return;
12217         case 0x7f: /* FSQRT */
12218             if (size == 3 && !is_q) {
12219                 unallocated_encoding(s);
12220                 return;
12221             }
12222             break;
12223         case 0x1a: /* FCVTNS */
12224         case 0x1b: /* FCVTMS */
12225         case 0x3a: /* FCVTPS */
12226         case 0x3b: /* FCVTZS */
12227         case 0x5a: /* FCVTNU */
12228         case 0x5b: /* FCVTMU */
12229         case 0x7a: /* FCVTPU */
12230         case 0x7b: /* FCVTZU */
12231             need_fpstatus = true;
12232             need_rmode = true;
12233             rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12234             if (size == 3 && !is_q) {
12235                 unallocated_encoding(s);
12236                 return;
12237             }
12238             break;
12239         case 0x5c: /* FCVTAU */
12240         case 0x1c: /* FCVTAS */
12241             need_fpstatus = true;
12242             need_rmode = true;
12243             rmode = FPROUNDING_TIEAWAY;
12244             if (size == 3 && !is_q) {
12245                 unallocated_encoding(s);
12246                 return;
12247             }
12248             break;
12249         case 0x3c: /* URECPE */
12250             if (size == 3) {
12251                 unallocated_encoding(s);
12252                 return;
12253             }
12254             /* fall through */
12255         case 0x3d: /* FRECPE */
12256         case 0x7d: /* FRSQRTE */
12257             if (size == 3 && !is_q) {
12258                 unallocated_encoding(s);
12259                 return;
12260             }
12261             if (!fp_access_check(s)) {
12262                 return;
12263             }
12264             handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
12265             return;
12266         case 0x56: /* FCVTXN, FCVTXN2 */
12267             if (size == 2) {
12268                 unallocated_encoding(s);
12269                 return;
12270             }
12271             /* fall through */
12272         case 0x16: /* FCVTN, FCVTN2 */
12273             /* handle_2misc_narrow does a 2*size -> size operation, but these
12274              * instructions encode the source size rather than dest size.
12275              */
12276             if (!fp_access_check(s)) {
12277                 return;
12278             }
12279             handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
12280             return;
12281         case 0x17: /* FCVTL, FCVTL2 */
12282             if (!fp_access_check(s)) {
12283                 return;
12284             }
12285             handle_2misc_widening(s, opcode, is_q, size, rn, rd);
12286             return;
12287         case 0x18: /* FRINTN */
12288         case 0x19: /* FRINTM */
12289         case 0x38: /* FRINTP */
12290         case 0x39: /* FRINTZ */
12291             need_rmode = true;
12292             rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12293             /* fall through */
12294         case 0x59: /* FRINTX */
12295         case 0x79: /* FRINTI */
12296             need_fpstatus = true;
12297             if (size == 3 && !is_q) {
12298                 unallocated_encoding(s);
12299                 return;
12300             }
12301             break;
12302         case 0x58: /* FRINTA */
12303             need_rmode = true;
12304             rmode = FPROUNDING_TIEAWAY;
12305             need_fpstatus = true;
12306             if (size == 3 && !is_q) {
12307                 unallocated_encoding(s);
12308                 return;
12309             }
12310             break;
12311         case 0x7c: /* URSQRTE */
12312             if (size == 3) {
12313                 unallocated_encoding(s);
12314                 return;
12315             }
12316             need_fpstatus = true;
12317             break;
12318         case 0x1e: /* FRINT32Z */
12319         case 0x1f: /* FRINT64Z */
12320             need_rmode = true;
12321             rmode = FPROUNDING_ZERO;
12322             /* fall through */
12323         case 0x5e: /* FRINT32X */
12324         case 0x5f: /* FRINT64X */
12325             need_fpstatus = true;
12326             if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) {
12327                 unallocated_encoding(s);
12328                 return;
12329             }
12330             break;
12331         default:
12332             unallocated_encoding(s);
12333             return;
12334         }
12335         break;
12336     }
12337     default:
12338         unallocated_encoding(s);
12339         return;
12340     }
12341 
12342     if (!fp_access_check(s)) {
12343         return;
12344     }
12345 
12346     if (need_fpstatus || need_rmode) {
12347         tcg_fpstatus = get_fpstatus_ptr(false);
12348     } else {
12349         tcg_fpstatus = NULL;
12350     }
12351     if (need_rmode) {
12352         tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
12353         gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12354     } else {
12355         tcg_rmode = NULL;
12356     }
12357 
12358     switch (opcode) {
12359     case 0x5:
12360         if (u && size == 0) { /* NOT */
12361             gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
12362             return;
12363         }
12364         break;
12365     case 0xb:
12366         if (u) { /* ABS, NEG */
12367             gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
12368         } else {
12369             gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size);
12370         }
12371         return;
12372     }
12373 
12374     if (size == 3) {
12375         /* All 64-bit element operations can be shared with scalar 2misc */
12376         int pass;
12377 
12378         /* Coverity claims (size == 3 && !is_q) has been eliminated
12379          * from all paths leading to here.
12380          */
12381         tcg_debug_assert(is_q);
12382         for (pass = 0; pass < 2; pass++) {
12383             TCGv_i64 tcg_op = tcg_temp_new_i64();
12384             TCGv_i64 tcg_res = tcg_temp_new_i64();
12385 
12386             read_vec_element(s, tcg_op, rn, pass, MO_64);
12387 
12388             handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
12389                             tcg_rmode, tcg_fpstatus);
12390 
12391             write_vec_element(s, tcg_res, rd, pass, MO_64);
12392 
12393             tcg_temp_free_i64(tcg_res);
12394             tcg_temp_free_i64(tcg_op);
12395         }
12396     } else {
12397         int pass;
12398 
12399         for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
12400             TCGv_i32 tcg_op = tcg_temp_new_i32();
12401             TCGv_i32 tcg_res = tcg_temp_new_i32();
12402             TCGCond cond;
12403 
12404             read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
12405 
12406             if (size == 2) {
12407                 /* Special cases for 32 bit elements */
12408                 switch (opcode) {
12409                 case 0xa: /* CMLT */
12410                     /* 32 bit integer comparison against zero, result is
12411                      * test ? (2^32 - 1) : 0. We implement via setcond(test)
12412                      * and inverting.
12413                      */
12414                     cond = TCG_COND_LT;
12415                 do_cmop:
12416                     tcg_gen_setcondi_i32(cond, tcg_res, tcg_op, 0);
12417                     tcg_gen_neg_i32(tcg_res, tcg_res);
12418                     break;
12419                 case 0x8: /* CMGT, CMGE */
12420                     cond = u ? TCG_COND_GE : TCG_COND_GT;
12421                     goto do_cmop;
12422                 case 0x9: /* CMEQ, CMLE */
12423                     cond = u ? TCG_COND_LE : TCG_COND_EQ;
12424                     goto do_cmop;
12425                 case 0x4: /* CLS */
12426                     if (u) {
12427                         tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
12428                     } else {
12429                         tcg_gen_clrsb_i32(tcg_res, tcg_op);
12430                     }
12431                     break;
12432                 case 0x7: /* SQABS, SQNEG */
12433                     if (u) {
12434                         gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
12435                     } else {
12436                         gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
12437                     }
12438                     break;
12439                 case 0x2f: /* FABS */
12440                     gen_helper_vfp_abss(tcg_res, tcg_op);
12441                     break;
12442                 case 0x6f: /* FNEG */
12443                     gen_helper_vfp_negs(tcg_res, tcg_op);
12444                     break;
12445                 case 0x7f: /* FSQRT */
12446                     gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
12447                     break;
12448                 case 0x1a: /* FCVTNS */
12449                 case 0x1b: /* FCVTMS */
12450                 case 0x1c: /* FCVTAS */
12451                 case 0x3a: /* FCVTPS */
12452                 case 0x3b: /* FCVTZS */
12453                 {
12454                     TCGv_i32 tcg_shift = tcg_const_i32(0);
12455                     gen_helper_vfp_tosls(tcg_res, tcg_op,
12456                                          tcg_shift, tcg_fpstatus);
12457                     tcg_temp_free_i32(tcg_shift);
12458                     break;
12459                 }
12460                 case 0x5a: /* FCVTNU */
12461                 case 0x5b: /* FCVTMU */
12462                 case 0x5c: /* FCVTAU */
12463                 case 0x7a: /* FCVTPU */
12464                 case 0x7b: /* FCVTZU */
12465                 {
12466                     TCGv_i32 tcg_shift = tcg_const_i32(0);
12467                     gen_helper_vfp_touls(tcg_res, tcg_op,
12468                                          tcg_shift, tcg_fpstatus);
12469                     tcg_temp_free_i32(tcg_shift);
12470                     break;
12471                 }
12472                 case 0x18: /* FRINTN */
12473                 case 0x19: /* FRINTM */
12474                 case 0x38: /* FRINTP */
12475                 case 0x39: /* FRINTZ */
12476                 case 0x58: /* FRINTA */
12477                 case 0x79: /* FRINTI */
12478                     gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
12479                     break;
12480                 case 0x59: /* FRINTX */
12481                     gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
12482                     break;
12483                 case 0x7c: /* URSQRTE */
12484                     gen_helper_rsqrte_u32(tcg_res, tcg_op, tcg_fpstatus);
12485                     break;
12486                 case 0x1e: /* FRINT32Z */
12487                 case 0x5e: /* FRINT32X */
12488                     gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus);
12489                     break;
12490                 case 0x1f: /* FRINT64Z */
12491                 case 0x5f: /* FRINT64X */
12492                     gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus);
12493                     break;
12494                 default:
12495                     g_assert_not_reached();
12496                 }
12497             } else {
12498                 /* Use helpers for 8 and 16 bit elements */
12499                 switch (opcode) {
12500                 case 0x5: /* CNT, RBIT */
12501                     /* For these two insns size is part of the opcode specifier
12502                      * (handled earlier); they always operate on byte elements.
12503                      */
12504                     if (u) {
12505                         gen_helper_neon_rbit_u8(tcg_res, tcg_op);
12506                     } else {
12507                         gen_helper_neon_cnt_u8(tcg_res, tcg_op);
12508                     }
12509                     break;
12510                 case 0x7: /* SQABS, SQNEG */
12511                 {
12512                     NeonGenOneOpEnvFn *genfn;
12513                     static NeonGenOneOpEnvFn * const fns[2][2] = {
12514                         { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
12515                         { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
12516                     };
12517                     genfn = fns[size][u];
12518                     genfn(tcg_res, cpu_env, tcg_op);
12519                     break;
12520                 }
12521                 case 0x8: /* CMGT, CMGE */
12522                 case 0x9: /* CMEQ, CMLE */
12523                 case 0xa: /* CMLT */
12524                 {
12525                     static NeonGenTwoOpFn * const fns[3][2] = {
12526                         { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_s16 },
12527                         { gen_helper_neon_cge_s8, gen_helper_neon_cge_s16 },
12528                         { gen_helper_neon_ceq_u8, gen_helper_neon_ceq_u16 },
12529                     };
12530                     NeonGenTwoOpFn *genfn;
12531                     int comp;
12532                     bool reverse;
12533                     TCGv_i32 tcg_zero = tcg_const_i32(0);
12534 
12535                     /* comp = index into [CMGT, CMGE, CMEQ, CMLE, CMLT] */
12536                     comp = (opcode - 0x8) * 2 + u;
12537                     /* ...but LE, LT are implemented as reverse GE, GT */
12538                     reverse = (comp > 2);
12539                     if (reverse) {
12540                         comp = 4 - comp;
12541                     }
12542                     genfn = fns[comp][size];
12543                     if (reverse) {
12544                         genfn(tcg_res, tcg_zero, tcg_op);
12545                     } else {
12546                         genfn(tcg_res, tcg_op, tcg_zero);
12547                     }
12548                     tcg_temp_free_i32(tcg_zero);
12549                     break;
12550                 }
12551                 case 0x4: /* CLS, CLZ */
12552                     if (u) {
12553                         if (size == 0) {
12554                             gen_helper_neon_clz_u8(tcg_res, tcg_op);
12555                         } else {
12556                             gen_helper_neon_clz_u16(tcg_res, tcg_op);
12557                         }
12558                     } else {
12559                         if (size == 0) {
12560                             gen_helper_neon_cls_s8(tcg_res, tcg_op);
12561                         } else {
12562                             gen_helper_neon_cls_s16(tcg_res, tcg_op);
12563                         }
12564                     }
12565                     break;
12566                 default:
12567                     g_assert_not_reached();
12568                 }
12569             }
12570 
12571             write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
12572 
12573             tcg_temp_free_i32(tcg_res);
12574             tcg_temp_free_i32(tcg_op);
12575         }
12576     }
12577     clear_vec_high(s, is_q, rd);
12578 
12579     if (need_rmode) {
12580         gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12581         tcg_temp_free_i32(tcg_rmode);
12582     }
12583     if (need_fpstatus) {
12584         tcg_temp_free_ptr(tcg_fpstatus);
12585     }
12586 }
12587 
12588 /* AdvSIMD [scalar] two register miscellaneous (FP16)
12589  *
12590  *   31  30  29 28  27     24  23 22 21       17 16    12 11 10 9    5 4    0
12591  * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12592  * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 |  Rn  |  Rd  |
12593  * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12594  *   mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
12595  *   val:  0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
12596  *
12597  * This actually covers two groups where scalar access is governed by
12598  * bit 28. A bunch of the instructions (float to integral) only exist
12599  * in the vector form and are un-allocated for the scalar decode. Also
12600  * in the scalar decode Q is always 1.
12601  */
disas_simd_two_reg_misc_fp16(DisasContext * s,uint32_t insn)12602 static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
12603 {
12604     int fpop, opcode, a, u;
12605     int rn, rd;
12606     bool is_q;
12607     bool is_scalar;
12608     bool only_in_vector = false;
12609 
12610     int pass;
12611     TCGv_i32 tcg_rmode = NULL;
12612     TCGv_ptr tcg_fpstatus = NULL;
12613     bool need_rmode = false;
12614     bool need_fpst = true;
12615     int rmode;
12616 
12617     if (!dc_isar_feature(aa64_fp16, s)) {
12618         unallocated_encoding(s);
12619         return;
12620     }
12621 
12622     rd = extract32(insn, 0, 5);
12623     rn = extract32(insn, 5, 5);
12624 
12625     a = extract32(insn, 23, 1);
12626     u = extract32(insn, 29, 1);
12627     is_scalar = extract32(insn, 28, 1);
12628     is_q = extract32(insn, 30, 1);
12629 
12630     opcode = extract32(insn, 12, 5);
12631     fpop = deposit32(opcode, 5, 1, a);
12632     fpop = deposit32(fpop, 6, 1, u);
12633 
12634     rd = extract32(insn, 0, 5);
12635     rn = extract32(insn, 5, 5);
12636 
12637     switch (fpop) {
12638     case 0x1d: /* SCVTF */
12639     case 0x5d: /* UCVTF */
12640     {
12641         int elements;
12642 
12643         if (is_scalar) {
12644             elements = 1;
12645         } else {
12646             elements = (is_q ? 8 : 4);
12647         }
12648 
12649         if (!fp_access_check(s)) {
12650             return;
12651         }
12652         handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
12653         return;
12654     }
12655     break;
12656     case 0x2c: /* FCMGT (zero) */
12657     case 0x2d: /* FCMEQ (zero) */
12658     case 0x2e: /* FCMLT (zero) */
12659     case 0x6c: /* FCMGE (zero) */
12660     case 0x6d: /* FCMLE (zero) */
12661         handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
12662         return;
12663     case 0x3d: /* FRECPE */
12664     case 0x3f: /* FRECPX */
12665         break;
12666     case 0x18: /* FRINTN */
12667         need_rmode = true;
12668         only_in_vector = true;
12669         rmode = FPROUNDING_TIEEVEN;
12670         break;
12671     case 0x19: /* FRINTM */
12672         need_rmode = true;
12673         only_in_vector = true;
12674         rmode = FPROUNDING_NEGINF;
12675         break;
12676     case 0x38: /* FRINTP */
12677         need_rmode = true;
12678         only_in_vector = true;
12679         rmode = FPROUNDING_POSINF;
12680         break;
12681     case 0x39: /* FRINTZ */
12682         need_rmode = true;
12683         only_in_vector = true;
12684         rmode = FPROUNDING_ZERO;
12685         break;
12686     case 0x58: /* FRINTA */
12687         need_rmode = true;
12688         only_in_vector = true;
12689         rmode = FPROUNDING_TIEAWAY;
12690         break;
12691     case 0x59: /* FRINTX */
12692     case 0x79: /* FRINTI */
12693         only_in_vector = true;
12694         /* current rounding mode */
12695         break;
12696     case 0x1a: /* FCVTNS */
12697         need_rmode = true;
12698         rmode = FPROUNDING_TIEEVEN;
12699         break;
12700     case 0x1b: /* FCVTMS */
12701         need_rmode = true;
12702         rmode = FPROUNDING_NEGINF;
12703         break;
12704     case 0x1c: /* FCVTAS */
12705         need_rmode = true;
12706         rmode = FPROUNDING_TIEAWAY;
12707         break;
12708     case 0x3a: /* FCVTPS */
12709         need_rmode = true;
12710         rmode = FPROUNDING_POSINF;
12711         break;
12712     case 0x3b: /* FCVTZS */
12713         need_rmode = true;
12714         rmode = FPROUNDING_ZERO;
12715         break;
12716     case 0x5a: /* FCVTNU */
12717         need_rmode = true;
12718         rmode = FPROUNDING_TIEEVEN;
12719         break;
12720     case 0x5b: /* FCVTMU */
12721         need_rmode = true;
12722         rmode = FPROUNDING_NEGINF;
12723         break;
12724     case 0x5c: /* FCVTAU */
12725         need_rmode = true;
12726         rmode = FPROUNDING_TIEAWAY;
12727         break;
12728     case 0x7a: /* FCVTPU */
12729         need_rmode = true;
12730         rmode = FPROUNDING_POSINF;
12731         break;
12732     case 0x7b: /* FCVTZU */
12733         need_rmode = true;
12734         rmode = FPROUNDING_ZERO;
12735         break;
12736     case 0x2f: /* FABS */
12737     case 0x6f: /* FNEG */
12738         need_fpst = false;
12739         break;
12740     case 0x7d: /* FRSQRTE */
12741     case 0x7f: /* FSQRT (vector) */
12742         break;
12743     default:
12744         fprintf(stderr, "%s: insn %#04x fpop %#2x\n", __func__, insn, fpop);
12745         g_assert_not_reached();
12746     }
12747 
12748 
12749     /* Check additional constraints for the scalar encoding */
12750     if (is_scalar) {
12751         if (!is_q) {
12752             unallocated_encoding(s);
12753             return;
12754         }
12755         /* FRINTxx is only in the vector form */
12756         if (only_in_vector) {
12757             unallocated_encoding(s);
12758             return;
12759         }
12760     }
12761 
12762     if (!fp_access_check(s)) {
12763         return;
12764     }
12765 
12766     if (need_rmode || need_fpst) {
12767         tcg_fpstatus = get_fpstatus_ptr(true);
12768     }
12769 
12770     if (need_rmode) {
12771         tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
12772         gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12773     }
12774 
12775     if (is_scalar) {
12776         TCGv_i32 tcg_op = read_fp_hreg(s, rn);
12777         TCGv_i32 tcg_res = tcg_temp_new_i32();
12778 
12779         switch (fpop) {
12780         case 0x1a: /* FCVTNS */
12781         case 0x1b: /* FCVTMS */
12782         case 0x1c: /* FCVTAS */
12783         case 0x3a: /* FCVTPS */
12784         case 0x3b: /* FCVTZS */
12785             gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12786             break;
12787         case 0x3d: /* FRECPE */
12788             gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12789             break;
12790         case 0x3f: /* FRECPX */
12791             gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
12792             break;
12793         case 0x5a: /* FCVTNU */
12794         case 0x5b: /* FCVTMU */
12795         case 0x5c: /* FCVTAU */
12796         case 0x7a: /* FCVTPU */
12797         case 0x7b: /* FCVTZU */
12798             gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12799             break;
12800         case 0x6f: /* FNEG */
12801             tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12802             break;
12803         case 0x7d: /* FRSQRTE */
12804             gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12805             break;
12806         default:
12807             g_assert_not_reached();
12808         }
12809 
12810         /* limit any sign extension going on */
12811         tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
12812         write_fp_sreg(s, rd, tcg_res);
12813 
12814         tcg_temp_free_i32(tcg_res);
12815         tcg_temp_free_i32(tcg_op);
12816     } else {
12817         for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
12818             TCGv_i32 tcg_op = tcg_temp_new_i32();
12819             TCGv_i32 tcg_res = tcg_temp_new_i32();
12820 
12821             read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
12822 
12823             switch (fpop) {
12824             case 0x1a: /* FCVTNS */
12825             case 0x1b: /* FCVTMS */
12826             case 0x1c: /* FCVTAS */
12827             case 0x3a: /* FCVTPS */
12828             case 0x3b: /* FCVTZS */
12829                 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12830                 break;
12831             case 0x3d: /* FRECPE */
12832                 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12833                 break;
12834             case 0x5a: /* FCVTNU */
12835             case 0x5b: /* FCVTMU */
12836             case 0x5c: /* FCVTAU */
12837             case 0x7a: /* FCVTPU */
12838             case 0x7b: /* FCVTZU */
12839                 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12840                 break;
12841             case 0x18: /* FRINTN */
12842             case 0x19: /* FRINTM */
12843             case 0x38: /* FRINTP */
12844             case 0x39: /* FRINTZ */
12845             case 0x58: /* FRINTA */
12846             case 0x79: /* FRINTI */
12847                 gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
12848                 break;
12849             case 0x59: /* FRINTX */
12850                 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
12851                 break;
12852             case 0x2f: /* FABS */
12853                 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
12854                 break;
12855             case 0x6f: /* FNEG */
12856                 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12857                 break;
12858             case 0x7d: /* FRSQRTE */
12859                 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12860                 break;
12861             case 0x7f: /* FSQRT */
12862                 gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
12863                 break;
12864             default:
12865                 g_assert_not_reached();
12866             }
12867 
12868             write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12869 
12870             tcg_temp_free_i32(tcg_res);
12871             tcg_temp_free_i32(tcg_op);
12872         }
12873 
12874         clear_vec_high(s, is_q, rd);
12875     }
12876 
12877     if (tcg_rmode) {
12878         gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12879         tcg_temp_free_i32(tcg_rmode);
12880     }
12881 
12882     if (tcg_fpstatus) {
12883         tcg_temp_free_ptr(tcg_fpstatus);
12884     }
12885 }
12886 
12887 /* AdvSIMD scalar x indexed element
12888  *  31 30  29 28       24 23  22 21  20  19  16 15 12  11  10 9    5 4    0
12889  * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12890  * | 0 1 | U | 1 1 1 1 1 | size | L | M |  Rm  | opc | H | 0 |  Rn  |  Rd  |
12891  * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12892  * AdvSIMD vector x indexed element
12893  *   31  30  29 28       24 23  22 21  20  19  16 15 12  11  10 9    5 4    0
12894  * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12895  * | 0 | Q | U | 0 1 1 1 1 | size | L | M |  Rm  | opc | H | 0 |  Rn  |  Rd  |
12896  * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12897  */
disas_simd_indexed(DisasContext * s,uint32_t insn)12898 static void disas_simd_indexed(DisasContext *s, uint32_t insn)
12899 {
12900     /* This encoding has two kinds of instruction:
12901      *  normal, where we perform elt x idxelt => elt for each
12902      *     element in the vector
12903      *  long, where we perform elt x idxelt and generate a result of
12904      *     double the width of the input element
12905      * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
12906      */
12907     bool is_scalar = extract32(insn, 28, 1);
12908     bool is_q = extract32(insn, 30, 1);
12909     bool u = extract32(insn, 29, 1);
12910     int size = extract32(insn, 22, 2);
12911     int l = extract32(insn, 21, 1);
12912     int m = extract32(insn, 20, 1);
12913     /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
12914     int rm = extract32(insn, 16, 4);
12915     int opcode = extract32(insn, 12, 4);
12916     int h = extract32(insn, 11, 1);
12917     int rn = extract32(insn, 5, 5);
12918     int rd = extract32(insn, 0, 5);
12919     bool is_long = false;
12920     int is_fp = 0;
12921     bool is_fp16 = false;
12922     int index;
12923     TCGv_ptr fpst;
12924 
12925     switch (16 * u + opcode) {
12926     case 0x08: /* MUL */
12927     case 0x10: /* MLA */
12928     case 0x14: /* MLS */
12929         if (is_scalar) {
12930             unallocated_encoding(s);
12931             return;
12932         }
12933         break;
12934     case 0x02: /* SMLAL, SMLAL2 */
12935     case 0x12: /* UMLAL, UMLAL2 */
12936     case 0x06: /* SMLSL, SMLSL2 */
12937     case 0x16: /* UMLSL, UMLSL2 */
12938     case 0x0a: /* SMULL, SMULL2 */
12939     case 0x1a: /* UMULL, UMULL2 */
12940         if (is_scalar) {
12941             unallocated_encoding(s);
12942             return;
12943         }
12944         is_long = true;
12945         break;
12946     case 0x03: /* SQDMLAL, SQDMLAL2 */
12947     case 0x07: /* SQDMLSL, SQDMLSL2 */
12948     case 0x0b: /* SQDMULL, SQDMULL2 */
12949         is_long = true;
12950         break;
12951     case 0x0c: /* SQDMULH */
12952     case 0x0d: /* SQRDMULH */
12953         break;
12954     case 0x01: /* FMLA */
12955     case 0x05: /* FMLS */
12956     case 0x09: /* FMUL */
12957     case 0x19: /* FMULX */
12958         is_fp = 1;
12959         break;
12960     case 0x1d: /* SQRDMLAH */
12961     case 0x1f: /* SQRDMLSH */
12962         if (!dc_isar_feature(aa64_rdm, s)) {
12963             unallocated_encoding(s);
12964             return;
12965         }
12966         break;
12967     case 0x0e: /* SDOT */
12968     case 0x1e: /* UDOT */
12969         if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
12970             unallocated_encoding(s);
12971             return;
12972         }
12973         break;
12974     case 0x11: /* FCMLA #0 */
12975     case 0x13: /* FCMLA #90 */
12976     case 0x15: /* FCMLA #180 */
12977     case 0x17: /* FCMLA #270 */
12978         if (is_scalar || !dc_isar_feature(aa64_fcma, s)) {
12979             unallocated_encoding(s);
12980             return;
12981         }
12982         is_fp = 2;
12983         break;
12984     case 0x00: /* FMLAL */
12985     case 0x04: /* FMLSL */
12986     case 0x18: /* FMLAL2 */
12987     case 0x1c: /* FMLSL2 */
12988         if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_fhm, s)) {
12989             unallocated_encoding(s);
12990             return;
12991         }
12992         size = MO_16;
12993         /* is_fp, but we pass cpu_env not fp_status.  */
12994         break;
12995     default:
12996         unallocated_encoding(s);
12997         return;
12998     }
12999 
13000     switch (is_fp) {
13001     case 1: /* normal fp */
13002         /* convert insn encoded size to MemOp size */
13003         switch (size) {
13004         case 0: /* half-precision */
13005             size = MO_16;
13006             is_fp16 = true;
13007             break;
13008         case MO_32: /* single precision */
13009         case MO_64: /* double precision */
13010             break;
13011         default:
13012             unallocated_encoding(s);
13013             return;
13014         }
13015         break;
13016 
13017     case 2: /* complex fp */
13018         /* Each indexable element is a complex pair.  */
13019         size += 1;
13020         switch (size) {
13021         case MO_32:
13022             if (h && !is_q) {
13023                 unallocated_encoding(s);
13024                 return;
13025             }
13026             is_fp16 = true;
13027             break;
13028         case MO_64:
13029             break;
13030         default:
13031             unallocated_encoding(s);
13032             return;
13033         }
13034         break;
13035 
13036     default: /* integer */
13037         switch (size) {
13038         case MO_8:
13039         case MO_64:
13040             unallocated_encoding(s);
13041             return;
13042         }
13043         break;
13044     }
13045     if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
13046         unallocated_encoding(s);
13047         return;
13048     }
13049 
13050     /* Given MemOp size, adjust register and indexing.  */
13051     switch (size) {
13052     case MO_16:
13053         index = h << 2 | l << 1 | m;
13054         break;
13055     case MO_32:
13056         index = h << 1 | l;
13057         rm |= m << 4;
13058         break;
13059     case MO_64:
13060         if (l || !is_q) {
13061             unallocated_encoding(s);
13062             return;
13063         }
13064         index = h;
13065         rm |= m << 4;
13066         break;
13067     default:
13068         g_assert_not_reached();
13069     }
13070 
13071     if (!fp_access_check(s)) {
13072         return;
13073     }
13074 
13075     if (is_fp) {
13076         fpst = get_fpstatus_ptr(is_fp16);
13077     } else {
13078         fpst = NULL;
13079     }
13080 
13081     switch (16 * u + opcode) {
13082     case 0x0e: /* SDOT */
13083     case 0x1e: /* UDOT */
13084         gen_gvec_op3_ool(s, is_q, rd, rn, rm, index,
13085                          u ? gen_helper_gvec_udot_idx_b
13086                          : gen_helper_gvec_sdot_idx_b);
13087         return;
13088     case 0x11: /* FCMLA #0 */
13089     case 0x13: /* FCMLA #90 */
13090     case 0x15: /* FCMLA #180 */
13091     case 0x17: /* FCMLA #270 */
13092         {
13093             int rot = extract32(insn, 13, 2);
13094             int data = (index << 2) | rot;
13095             tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
13096                                vec_full_reg_offset(s, rn),
13097                                vec_full_reg_offset(s, rm), fpst,
13098                                is_q ? 16 : 8, vec_full_reg_size(s), data,
13099                                size == MO_64
13100                                ? gen_helper_gvec_fcmlas_idx
13101                                : gen_helper_gvec_fcmlah_idx);
13102             tcg_temp_free_ptr(fpst);
13103         }
13104         return;
13105 
13106     case 0x00: /* FMLAL */
13107     case 0x04: /* FMLSL */
13108     case 0x18: /* FMLAL2 */
13109     case 0x1c: /* FMLSL2 */
13110         {
13111             int is_s = extract32(opcode, 2, 1);
13112             int is_2 = u;
13113             int data = (index << 2) | (is_2 << 1) | is_s;
13114             tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
13115                                vec_full_reg_offset(s, rn),
13116                                vec_full_reg_offset(s, rm), cpu_env,
13117                                is_q ? 16 : 8, vec_full_reg_size(s),
13118                                data, gen_helper_gvec_fmlal_idx_a64);
13119         }
13120         return;
13121     }
13122 
13123     if (size == 3) {
13124         TCGv_i64 tcg_idx = tcg_temp_new_i64();
13125         int pass;
13126 
13127         assert(is_fp && is_q && !is_long);
13128 
13129         read_vec_element(s, tcg_idx, rm, index, MO_64);
13130 
13131         for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13132             TCGv_i64 tcg_op = tcg_temp_new_i64();
13133             TCGv_i64 tcg_res = tcg_temp_new_i64();
13134 
13135             read_vec_element(s, tcg_op, rn, pass, MO_64);
13136 
13137             switch (16 * u + opcode) {
13138             case 0x05: /* FMLS */
13139                 /* As usual for ARM, separate negation for fused multiply-add */
13140                 gen_helper_vfp_negd(tcg_op, tcg_op);
13141                 /* fall through */
13142             case 0x01: /* FMLA */
13143                 read_vec_element(s, tcg_res, rd, pass, MO_64);
13144                 gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
13145                 break;
13146             case 0x09: /* FMUL */
13147                 gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
13148                 break;
13149             case 0x19: /* FMULX */
13150                 gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
13151                 break;
13152             default:
13153                 g_assert_not_reached();
13154             }
13155 
13156             write_vec_element(s, tcg_res, rd, pass, MO_64);
13157             tcg_temp_free_i64(tcg_op);
13158             tcg_temp_free_i64(tcg_res);
13159         }
13160 
13161         tcg_temp_free_i64(tcg_idx);
13162         clear_vec_high(s, !is_scalar, rd);
13163     } else if (!is_long) {
13164         /* 32 bit floating point, or 16 or 32 bit integer.
13165          * For the 16 bit scalar case we use the usual Neon helpers and
13166          * rely on the fact that 0 op 0 == 0 with no side effects.
13167          */
13168         TCGv_i32 tcg_idx = tcg_temp_new_i32();
13169         int pass, maxpasses;
13170 
13171         if (is_scalar) {
13172             maxpasses = 1;
13173         } else {
13174             maxpasses = is_q ? 4 : 2;
13175         }
13176 
13177         read_vec_element_i32(s, tcg_idx, rm, index, size);
13178 
13179         if (size == 1 && !is_scalar) {
13180             /* The simplest way to handle the 16x16 indexed ops is to duplicate
13181              * the index into both halves of the 32 bit tcg_idx and then use
13182              * the usual Neon helpers.
13183              */
13184             tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13185         }
13186 
13187         for (pass = 0; pass < maxpasses; pass++) {
13188             TCGv_i32 tcg_op = tcg_temp_new_i32();
13189             TCGv_i32 tcg_res = tcg_temp_new_i32();
13190 
13191             read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
13192 
13193             switch (16 * u + opcode) {
13194             case 0x08: /* MUL */
13195             case 0x10: /* MLA */
13196             case 0x14: /* MLS */
13197             {
13198                 static NeonGenTwoOpFn * const fns[2][2] = {
13199                     { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
13200                     { tcg_gen_add_i32, tcg_gen_sub_i32 },
13201                 };
13202                 NeonGenTwoOpFn *genfn;
13203                 bool is_sub = opcode == 0x4;
13204 
13205                 if (size == 1) {
13206                     gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
13207                 } else {
13208                     tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
13209                 }
13210                 if (opcode == 0x8) {
13211                     break;
13212                 }
13213                 read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
13214                 genfn = fns[size - 1][is_sub];
13215                 genfn(tcg_res, tcg_op, tcg_res);
13216                 break;
13217             }
13218             case 0x05: /* FMLS */
13219             case 0x01: /* FMLA */
13220                 read_vec_element_i32(s, tcg_res, rd, pass,
13221                                      is_scalar ? size : MO_32);
13222                 switch (size) {
13223                 case 1:
13224                     if (opcode == 0x5) {
13225                         /* As usual for ARM, separate negation for fused
13226                          * multiply-add */
13227                         tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
13228                     }
13229                     if (is_scalar) {
13230                         gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
13231                                                    tcg_res, fpst);
13232                     } else {
13233                         gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
13234                                                     tcg_res, fpst);
13235                     }
13236                     break;
13237                 case 2:
13238                     if (opcode == 0x5) {
13239                         /* As usual for ARM, separate negation for
13240                          * fused multiply-add */
13241                         tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
13242                     }
13243                     gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
13244                                            tcg_res, fpst);
13245                     break;
13246                 default:
13247                     g_assert_not_reached();
13248                 }
13249                 break;
13250             case 0x09: /* FMUL */
13251                 switch (size) {
13252                 case 1:
13253                     if (is_scalar) {
13254                         gen_helper_advsimd_mulh(tcg_res, tcg_op,
13255                                                 tcg_idx, fpst);
13256                     } else {
13257                         gen_helper_advsimd_mul2h(tcg_res, tcg_op,
13258                                                  tcg_idx, fpst);
13259                     }
13260                     break;
13261                 case 2:
13262                     gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
13263                     break;
13264                 default:
13265                     g_assert_not_reached();
13266                 }
13267                 break;
13268             case 0x19: /* FMULX */
13269                 switch (size) {
13270                 case 1:
13271                     if (is_scalar) {
13272                         gen_helper_advsimd_mulxh(tcg_res, tcg_op,
13273                                                  tcg_idx, fpst);
13274                     } else {
13275                         gen_helper_advsimd_mulx2h(tcg_res, tcg_op,
13276                                                   tcg_idx, fpst);
13277                     }
13278                     break;
13279                 case 2:
13280                     gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
13281                     break;
13282                 default:
13283                     g_assert_not_reached();
13284                 }
13285                 break;
13286             case 0x0c: /* SQDMULH */
13287                 if (size == 1) {
13288                     gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
13289                                                tcg_op, tcg_idx);
13290                 } else {
13291                     gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
13292                                                tcg_op, tcg_idx);
13293                 }
13294                 break;
13295             case 0x0d: /* SQRDMULH */
13296                 if (size == 1) {
13297                     gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
13298                                                 tcg_op, tcg_idx);
13299                 } else {
13300                     gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
13301                                                 tcg_op, tcg_idx);
13302                 }
13303                 break;
13304             case 0x1d: /* SQRDMLAH */
13305                 read_vec_element_i32(s, tcg_res, rd, pass,
13306                                      is_scalar ? size : MO_32);
13307                 if (size == 1) {
13308                     gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env,
13309                                                 tcg_op, tcg_idx, tcg_res);
13310                 } else {
13311                     gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env,
13312                                                 tcg_op, tcg_idx, tcg_res);
13313                 }
13314                 break;
13315             case 0x1f: /* SQRDMLSH */
13316                 read_vec_element_i32(s, tcg_res, rd, pass,
13317                                      is_scalar ? size : MO_32);
13318                 if (size == 1) {
13319                     gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env,
13320                                                 tcg_op, tcg_idx, tcg_res);
13321                 } else {
13322                     gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env,
13323                                                 tcg_op, tcg_idx, tcg_res);
13324                 }
13325                 break;
13326             default:
13327                 g_assert_not_reached();
13328             }
13329 
13330             if (is_scalar) {
13331                 write_fp_sreg(s, rd, tcg_res);
13332             } else {
13333                 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
13334             }
13335 
13336             tcg_temp_free_i32(tcg_op);
13337             tcg_temp_free_i32(tcg_res);
13338         }
13339 
13340         tcg_temp_free_i32(tcg_idx);
13341         clear_vec_high(s, is_q, rd);
13342     } else {
13343         /* long ops: 16x16->32 or 32x32->64 */
13344         TCGv_i64 tcg_res[2];
13345         int pass;
13346         bool satop = extract32(opcode, 0, 1);
13347         MemOp memop = MO_32;
13348 
13349         if (satop || !u) {
13350             memop |= MO_SIGN;
13351         }
13352 
13353         if (size == 2) {
13354             TCGv_i64 tcg_idx = tcg_temp_new_i64();
13355 
13356             read_vec_element(s, tcg_idx, rm, index, memop);
13357 
13358             for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13359                 TCGv_i64 tcg_op = tcg_temp_new_i64();
13360                 TCGv_i64 tcg_passres;
13361                 int passelt;
13362 
13363                 if (is_scalar) {
13364                     passelt = 0;
13365                 } else {
13366                     passelt = pass + (is_q * 2);
13367                 }
13368 
13369                 read_vec_element(s, tcg_op, rn, passelt, memop);
13370 
13371                 tcg_res[pass] = tcg_temp_new_i64();
13372 
13373                 if (opcode == 0xa || opcode == 0xb) {
13374                     /* Non-accumulating ops */
13375                     tcg_passres = tcg_res[pass];
13376                 } else {
13377                     tcg_passres = tcg_temp_new_i64();
13378                 }
13379 
13380                 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
13381                 tcg_temp_free_i64(tcg_op);
13382 
13383                 if (satop) {
13384                     /* saturating, doubling */
13385                     gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
13386                                                       tcg_passres, tcg_passres);
13387                 }
13388 
13389                 if (opcode == 0xa || opcode == 0xb) {
13390                     continue;
13391                 }
13392 
13393                 /* Accumulating op: handle accumulate step */
13394                 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13395 
13396                 switch (opcode) {
13397                 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13398                     tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13399                     break;
13400                 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13401                     tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13402                     break;
13403                 case 0x7: /* SQDMLSL, SQDMLSL2 */
13404                     tcg_gen_neg_i64(tcg_passres, tcg_passres);
13405                     /* fall through */
13406                 case 0x3: /* SQDMLAL, SQDMLAL2 */
13407                     gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
13408                                                       tcg_res[pass],
13409                                                       tcg_passres);
13410                     break;
13411                 default:
13412                     g_assert_not_reached();
13413                 }
13414                 tcg_temp_free_i64(tcg_passres);
13415             }
13416             tcg_temp_free_i64(tcg_idx);
13417 
13418             clear_vec_high(s, !is_scalar, rd);
13419         } else {
13420             TCGv_i32 tcg_idx = tcg_temp_new_i32();
13421 
13422             assert(size == 1);
13423             read_vec_element_i32(s, tcg_idx, rm, index, size);
13424 
13425             if (!is_scalar) {
13426                 /* The simplest way to handle the 16x16 indexed ops is to
13427                  * duplicate the index into both halves of the 32 bit tcg_idx
13428                  * and then use the usual Neon helpers.
13429                  */
13430                 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13431             }
13432 
13433             for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13434                 TCGv_i32 tcg_op = tcg_temp_new_i32();
13435                 TCGv_i64 tcg_passres;
13436 
13437                 if (is_scalar) {
13438                     read_vec_element_i32(s, tcg_op, rn, pass, size);
13439                 } else {
13440                     read_vec_element_i32(s, tcg_op, rn,
13441                                          pass + (is_q * 2), MO_32);
13442                 }
13443 
13444                 tcg_res[pass] = tcg_temp_new_i64();
13445 
13446                 if (opcode == 0xa || opcode == 0xb) {
13447                     /* Non-accumulating ops */
13448                     tcg_passres = tcg_res[pass];
13449                 } else {
13450                     tcg_passres = tcg_temp_new_i64();
13451                 }
13452 
13453                 if (memop & MO_SIGN) {
13454                     gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
13455                 } else {
13456                     gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
13457                 }
13458                 if (satop) {
13459                     gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
13460                                                       tcg_passres, tcg_passres);
13461                 }
13462                 tcg_temp_free_i32(tcg_op);
13463 
13464                 if (opcode == 0xa || opcode == 0xb) {
13465                     continue;
13466                 }
13467 
13468                 /* Accumulating op: handle accumulate step */
13469                 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13470 
13471                 switch (opcode) {
13472                 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13473                     gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
13474                                              tcg_passres);
13475                     break;
13476                 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13477                     gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
13478                                              tcg_passres);
13479                     break;
13480                 case 0x7: /* SQDMLSL, SQDMLSL2 */
13481                     gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
13482                     /* fall through */
13483                 case 0x3: /* SQDMLAL, SQDMLAL2 */
13484                     gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
13485                                                       tcg_res[pass],
13486                                                       tcg_passres);
13487                     break;
13488                 default:
13489                     g_assert_not_reached();
13490                 }
13491                 tcg_temp_free_i64(tcg_passres);
13492             }
13493             tcg_temp_free_i32(tcg_idx);
13494 
13495             if (is_scalar) {
13496                 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
13497             }
13498         }
13499 
13500         if (is_scalar) {
13501             tcg_res[1] = tcg_const_i64(0);
13502         }
13503 
13504         for (pass = 0; pass < 2; pass++) {
13505             write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13506             tcg_temp_free_i64(tcg_res[pass]);
13507         }
13508     }
13509 
13510     if (fpst) {
13511         tcg_temp_free_ptr(fpst);
13512     }
13513 }
13514 
13515 /* Crypto AES
13516  *  31             24 23  22 21       17 16    12 11 10 9    5 4    0
13517  * +-----------------+------+-----------+--------+-----+------+------+
13518  * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 |  Rn  |  Rd  |
13519  * +-----------------+------+-----------+--------+-----+------+------+
13520  */
disas_crypto_aes(DisasContext * s,uint32_t insn)13521 static void disas_crypto_aes(DisasContext *s, uint32_t insn)
13522 {
13523     int size = extract32(insn, 22, 2);
13524     int opcode = extract32(insn, 12, 5);
13525     int rn = extract32(insn, 5, 5);
13526     int rd = extract32(insn, 0, 5);
13527     int decrypt;
13528     TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
13529     TCGv_i32 tcg_decrypt;
13530     CryptoThreeOpIntFn *genfn;
13531 
13532     if (!dc_isar_feature(aa64_aes, s) || size != 0) {
13533         unallocated_encoding(s);
13534         return;
13535     }
13536 
13537     switch (opcode) {
13538     case 0x4: /* AESE */
13539         decrypt = 0;
13540         genfn = gen_helper_crypto_aese;
13541         break;
13542     case 0x6: /* AESMC */
13543         decrypt = 0;
13544         genfn = gen_helper_crypto_aesmc;
13545         break;
13546     case 0x5: /* AESD */
13547         decrypt = 1;
13548         genfn = gen_helper_crypto_aese;
13549         break;
13550     case 0x7: /* AESIMC */
13551         decrypt = 1;
13552         genfn = gen_helper_crypto_aesmc;
13553         break;
13554     default:
13555         unallocated_encoding(s);
13556         return;
13557     }
13558 
13559     if (!fp_access_check(s)) {
13560         return;
13561     }
13562 
13563     tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13564     tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13565     tcg_decrypt = tcg_const_i32(decrypt);
13566 
13567     genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_decrypt);
13568 
13569     tcg_temp_free_ptr(tcg_rd_ptr);
13570     tcg_temp_free_ptr(tcg_rn_ptr);
13571     tcg_temp_free_i32(tcg_decrypt);
13572 }
13573 
13574 /* Crypto three-reg SHA
13575  *  31             24 23  22  21 20  16  15 14    12 11 10 9    5 4    0
13576  * +-----------------+------+---+------+---+--------+-----+------+------+
13577  * | 0 1 0 1 1 1 1 0 | size | 0 |  Rm  | 0 | opcode | 0 0 |  Rn  |  Rd  |
13578  * +-----------------+------+---+------+---+--------+-----+------+------+
13579  */
disas_crypto_three_reg_sha(DisasContext * s,uint32_t insn)13580 static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
13581 {
13582     int size = extract32(insn, 22, 2);
13583     int opcode = extract32(insn, 12, 3);
13584     int rm = extract32(insn, 16, 5);
13585     int rn = extract32(insn, 5, 5);
13586     int rd = extract32(insn, 0, 5);
13587     CryptoThreeOpFn *genfn;
13588     TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
13589     bool feature;
13590 
13591     if (size != 0) {
13592         unallocated_encoding(s);
13593         return;
13594     }
13595 
13596     switch (opcode) {
13597     case 0: /* SHA1C */
13598     case 1: /* SHA1P */
13599     case 2: /* SHA1M */
13600     case 3: /* SHA1SU0 */
13601         genfn = NULL;
13602         feature = dc_isar_feature(aa64_sha1, s);
13603         break;
13604     case 4: /* SHA256H */
13605         genfn = gen_helper_crypto_sha256h;
13606         feature = dc_isar_feature(aa64_sha256, s);
13607         break;
13608     case 5: /* SHA256H2 */
13609         genfn = gen_helper_crypto_sha256h2;
13610         feature = dc_isar_feature(aa64_sha256, s);
13611         break;
13612     case 6: /* SHA256SU1 */
13613         genfn = gen_helper_crypto_sha256su1;
13614         feature = dc_isar_feature(aa64_sha256, s);
13615         break;
13616     default:
13617         unallocated_encoding(s);
13618         return;
13619     }
13620 
13621     if (!feature) {
13622         unallocated_encoding(s);
13623         return;
13624     }
13625 
13626     if (!fp_access_check(s)) {
13627         return;
13628     }
13629 
13630     tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13631     tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13632     tcg_rm_ptr = vec_full_reg_ptr(s, rm);
13633 
13634     if (genfn) {
13635         genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr);
13636     } else {
13637         TCGv_i32 tcg_opcode = tcg_const_i32(opcode);
13638 
13639         gen_helper_crypto_sha1_3reg(tcg_rd_ptr, tcg_rn_ptr,
13640                                     tcg_rm_ptr, tcg_opcode);
13641         tcg_temp_free_i32(tcg_opcode);
13642     }
13643 
13644     tcg_temp_free_ptr(tcg_rd_ptr);
13645     tcg_temp_free_ptr(tcg_rn_ptr);
13646     tcg_temp_free_ptr(tcg_rm_ptr);
13647 }
13648 
13649 /* Crypto two-reg SHA
13650  *  31             24 23  22 21       17 16    12 11 10 9    5 4    0
13651  * +-----------------+------+-----------+--------+-----+------+------+
13652  * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 |  Rn  |  Rd  |
13653  * +-----------------+------+-----------+--------+-----+------+------+
13654  */
disas_crypto_two_reg_sha(DisasContext * s,uint32_t insn)13655 static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
13656 {
13657     int size = extract32(insn, 22, 2);
13658     int opcode = extract32(insn, 12, 5);
13659     int rn = extract32(insn, 5, 5);
13660     int rd = extract32(insn, 0, 5);
13661     CryptoTwoOpFn *genfn;
13662     bool feature;
13663     TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
13664 
13665     if (size != 0) {
13666         unallocated_encoding(s);
13667         return;
13668     }
13669 
13670     switch (opcode) {
13671     case 0: /* SHA1H */
13672         feature = dc_isar_feature(aa64_sha1, s);
13673         genfn = gen_helper_crypto_sha1h;
13674         break;
13675     case 1: /* SHA1SU1 */
13676         feature = dc_isar_feature(aa64_sha1, s);
13677         genfn = gen_helper_crypto_sha1su1;
13678         break;
13679     case 2: /* SHA256SU0 */
13680         feature = dc_isar_feature(aa64_sha256, s);
13681         genfn = gen_helper_crypto_sha256su0;
13682         break;
13683     default:
13684         unallocated_encoding(s);
13685         return;
13686     }
13687 
13688     if (!feature) {
13689         unallocated_encoding(s);
13690         return;
13691     }
13692 
13693     if (!fp_access_check(s)) {
13694         return;
13695     }
13696 
13697     tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13698     tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13699 
13700     genfn(tcg_rd_ptr, tcg_rn_ptr);
13701 
13702     tcg_temp_free_ptr(tcg_rd_ptr);
13703     tcg_temp_free_ptr(tcg_rn_ptr);
13704 }
13705 
13706 /* Crypto three-reg SHA512
13707  *  31                   21 20  16 15  14  13 12  11  10  9    5 4    0
13708  * +-----------------------+------+---+---+-----+--------+------+------+
13709  * | 1 1 0 0 1 1 1 0 0 1 1 |  Rm  | 1 | O | 0 0 | opcode |  Rn  |  Rd  |
13710  * +-----------------------+------+---+---+-----+--------+------+------+
13711  */
disas_crypto_three_reg_sha512(DisasContext * s,uint32_t insn)13712 static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
13713 {
13714     int opcode = extract32(insn, 10, 2);
13715     int o =  extract32(insn, 14, 1);
13716     int rm = extract32(insn, 16, 5);
13717     int rn = extract32(insn, 5, 5);
13718     int rd = extract32(insn, 0, 5);
13719     bool feature;
13720     CryptoThreeOpFn *genfn;
13721 
13722     if (o == 0) {
13723         switch (opcode) {
13724         case 0: /* SHA512H */
13725             feature = dc_isar_feature(aa64_sha512, s);
13726             genfn = gen_helper_crypto_sha512h;
13727             break;
13728         case 1: /* SHA512H2 */
13729             feature = dc_isar_feature(aa64_sha512, s);
13730             genfn = gen_helper_crypto_sha512h2;
13731             break;
13732         case 2: /* SHA512SU1 */
13733             feature = dc_isar_feature(aa64_sha512, s);
13734             genfn = gen_helper_crypto_sha512su1;
13735             break;
13736         case 3: /* RAX1 */
13737             feature = dc_isar_feature(aa64_sha3, s);
13738             genfn = NULL;
13739             break;
13740         default:
13741             g_assert_not_reached();
13742         }
13743     } else {
13744         switch (opcode) {
13745         case 0: /* SM3PARTW1 */
13746             feature = dc_isar_feature(aa64_sm3, s);
13747             genfn = gen_helper_crypto_sm3partw1;
13748             break;
13749         case 1: /* SM3PARTW2 */
13750             feature = dc_isar_feature(aa64_sm3, s);
13751             genfn = gen_helper_crypto_sm3partw2;
13752             break;
13753         case 2: /* SM4EKEY */
13754             feature = dc_isar_feature(aa64_sm4, s);
13755             genfn = gen_helper_crypto_sm4ekey;
13756             break;
13757         default:
13758             unallocated_encoding(s);
13759             return;
13760         }
13761     }
13762 
13763     if (!feature) {
13764         unallocated_encoding(s);
13765         return;
13766     }
13767 
13768     if (!fp_access_check(s)) {
13769         return;
13770     }
13771 
13772     if (genfn) {
13773         TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
13774 
13775         tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13776         tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13777         tcg_rm_ptr = vec_full_reg_ptr(s, rm);
13778 
13779         genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr);
13780 
13781         tcg_temp_free_ptr(tcg_rd_ptr);
13782         tcg_temp_free_ptr(tcg_rn_ptr);
13783         tcg_temp_free_ptr(tcg_rm_ptr);
13784     } else {
13785         TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
13786         int pass;
13787 
13788         tcg_op1 = tcg_temp_new_i64();
13789         tcg_op2 = tcg_temp_new_i64();
13790         tcg_res[0] = tcg_temp_new_i64();
13791         tcg_res[1] = tcg_temp_new_i64();
13792 
13793         for (pass = 0; pass < 2; pass++) {
13794             read_vec_element(s, tcg_op1, rn, pass, MO_64);
13795             read_vec_element(s, tcg_op2, rm, pass, MO_64);
13796 
13797             tcg_gen_rotli_i64(tcg_res[pass], tcg_op2, 1);
13798             tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
13799         }
13800         write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13801         write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13802 
13803         tcg_temp_free_i64(tcg_op1);
13804         tcg_temp_free_i64(tcg_op2);
13805         tcg_temp_free_i64(tcg_res[0]);
13806         tcg_temp_free_i64(tcg_res[1]);
13807     }
13808 }
13809 
13810 /* Crypto two-reg SHA512
13811  *  31                                     12  11  10  9    5 4    0
13812  * +-----------------------------------------+--------+------+------+
13813  * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode |  Rn  |  Rd  |
13814  * +-----------------------------------------+--------+------+------+
13815  */
disas_crypto_two_reg_sha512(DisasContext * s,uint32_t insn)13816 static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
13817 {
13818     int opcode = extract32(insn, 10, 2);
13819     int rn = extract32(insn, 5, 5);
13820     int rd = extract32(insn, 0, 5);
13821     TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
13822     bool feature;
13823     CryptoTwoOpFn *genfn;
13824 
13825     switch (opcode) {
13826     case 0: /* SHA512SU0 */
13827         feature = dc_isar_feature(aa64_sha512, s);
13828         genfn = gen_helper_crypto_sha512su0;
13829         break;
13830     case 1: /* SM4E */
13831         feature = dc_isar_feature(aa64_sm4, s);
13832         genfn = gen_helper_crypto_sm4e;
13833         break;
13834     default:
13835         unallocated_encoding(s);
13836         return;
13837     }
13838 
13839     if (!feature) {
13840         unallocated_encoding(s);
13841         return;
13842     }
13843 
13844     if (!fp_access_check(s)) {
13845         return;
13846     }
13847 
13848     tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13849     tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13850 
13851     genfn(tcg_rd_ptr, tcg_rn_ptr);
13852 
13853     tcg_temp_free_ptr(tcg_rd_ptr);
13854     tcg_temp_free_ptr(tcg_rn_ptr);
13855 }
13856 
13857 /* Crypto four-register
13858  *  31               23 22 21 20  16 15  14  10 9    5 4    0
13859  * +-------------------+-----+------+---+------+------+------+
13860  * | 1 1 0 0 1 1 1 0 0 | Op0 |  Rm  | 0 |  Ra  |  Rn  |  Rd  |
13861  * +-------------------+-----+------+---+------+------+------+
13862  */
disas_crypto_four_reg(DisasContext * s,uint32_t insn)13863 static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
13864 {
13865     int op0 = extract32(insn, 21, 2);
13866     int rm = extract32(insn, 16, 5);
13867     int ra = extract32(insn, 10, 5);
13868     int rn = extract32(insn, 5, 5);
13869     int rd = extract32(insn, 0, 5);
13870     bool feature;
13871 
13872     switch (op0) {
13873     case 0: /* EOR3 */
13874     case 1: /* BCAX */
13875         feature = dc_isar_feature(aa64_sha3, s);
13876         break;
13877     case 2: /* SM3SS1 */
13878         feature = dc_isar_feature(aa64_sm3, s);
13879         break;
13880     default:
13881         unallocated_encoding(s);
13882         return;
13883     }
13884 
13885     if (!feature) {
13886         unallocated_encoding(s);
13887         return;
13888     }
13889 
13890     if (!fp_access_check(s)) {
13891         return;
13892     }
13893 
13894     if (op0 < 2) {
13895         TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
13896         int pass;
13897 
13898         tcg_op1 = tcg_temp_new_i64();
13899         tcg_op2 = tcg_temp_new_i64();
13900         tcg_op3 = tcg_temp_new_i64();
13901         tcg_res[0] = tcg_temp_new_i64();
13902         tcg_res[1] = tcg_temp_new_i64();
13903 
13904         for (pass = 0; pass < 2; pass++) {
13905             read_vec_element(s, tcg_op1, rn, pass, MO_64);
13906             read_vec_element(s, tcg_op2, rm, pass, MO_64);
13907             read_vec_element(s, tcg_op3, ra, pass, MO_64);
13908 
13909             if (op0 == 0) {
13910                 /* EOR3 */
13911                 tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
13912             } else {
13913                 /* BCAX */
13914                 tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
13915             }
13916             tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
13917         }
13918         write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13919         write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13920 
13921         tcg_temp_free_i64(tcg_op1);
13922         tcg_temp_free_i64(tcg_op2);
13923         tcg_temp_free_i64(tcg_op3);
13924         tcg_temp_free_i64(tcg_res[0]);
13925         tcg_temp_free_i64(tcg_res[1]);
13926     } else {
13927         TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
13928 
13929         tcg_op1 = tcg_temp_new_i32();
13930         tcg_op2 = tcg_temp_new_i32();
13931         tcg_op3 = tcg_temp_new_i32();
13932         tcg_res = tcg_temp_new_i32();
13933         tcg_zero = tcg_const_i32(0);
13934 
13935         read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
13936         read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
13937         read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
13938 
13939         tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
13940         tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
13941         tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
13942         tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
13943 
13944         write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
13945         write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
13946         write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
13947         write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
13948 
13949         tcg_temp_free_i32(tcg_op1);
13950         tcg_temp_free_i32(tcg_op2);
13951         tcg_temp_free_i32(tcg_op3);
13952         tcg_temp_free_i32(tcg_res);
13953         tcg_temp_free_i32(tcg_zero);
13954     }
13955 }
13956 
13957 /* Crypto XAR
13958  *  31                   21 20  16 15    10 9    5 4    0
13959  * +-----------------------+------+--------+------+------+
13960  * | 1 1 0 0 1 1 1 0 1 0 0 |  Rm  |  imm6  |  Rn  |  Rd  |
13961  * +-----------------------+------+--------+------+------+
13962  */
disas_crypto_xar(DisasContext * s,uint32_t insn)13963 static void disas_crypto_xar(DisasContext *s, uint32_t insn)
13964 {
13965     int rm = extract32(insn, 16, 5);
13966     int imm6 = extract32(insn, 10, 6);
13967     int rn = extract32(insn, 5, 5);
13968     int rd = extract32(insn, 0, 5);
13969     TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
13970     int pass;
13971 
13972     if (!dc_isar_feature(aa64_sha3, s)) {
13973         unallocated_encoding(s);
13974         return;
13975     }
13976 
13977     if (!fp_access_check(s)) {
13978         return;
13979     }
13980 
13981     tcg_op1 = tcg_temp_new_i64();
13982     tcg_op2 = tcg_temp_new_i64();
13983     tcg_res[0] = tcg_temp_new_i64();
13984     tcg_res[1] = tcg_temp_new_i64();
13985 
13986     for (pass = 0; pass < 2; pass++) {
13987         read_vec_element(s, tcg_op1, rn, pass, MO_64);
13988         read_vec_element(s, tcg_op2, rm, pass, MO_64);
13989 
13990         tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2);
13991         tcg_gen_rotri_i64(tcg_res[pass], tcg_res[pass], imm6);
13992     }
13993     write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13994     write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13995 
13996     tcg_temp_free_i64(tcg_op1);
13997     tcg_temp_free_i64(tcg_op2);
13998     tcg_temp_free_i64(tcg_res[0]);
13999     tcg_temp_free_i64(tcg_res[1]);
14000 }
14001 
14002 /* Crypto three-reg imm2
14003  *  31                   21 20  16 15  14 13 12  11  10  9    5 4    0
14004  * +-----------------------+------+-----+------+--------+------+------+
14005  * | 1 1 0 0 1 1 1 0 0 1 0 |  Rm  | 1 0 | imm2 | opcode |  Rn  |  Rd  |
14006  * +-----------------------+------+-----+------+--------+------+------+
14007  */
disas_crypto_three_reg_imm2(DisasContext * s,uint32_t insn)14008 static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
14009 {
14010     int opcode = extract32(insn, 10, 2);
14011     int imm2 = extract32(insn, 12, 2);
14012     int rm = extract32(insn, 16, 5);
14013     int rn = extract32(insn, 5, 5);
14014     int rd = extract32(insn, 0, 5);
14015     TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
14016     TCGv_i32 tcg_imm2, tcg_opcode;
14017 
14018     if (!dc_isar_feature(aa64_sm3, s)) {
14019         unallocated_encoding(s);
14020         return;
14021     }
14022 
14023     if (!fp_access_check(s)) {
14024         return;
14025     }
14026 
14027     tcg_rd_ptr = vec_full_reg_ptr(s, rd);
14028     tcg_rn_ptr = vec_full_reg_ptr(s, rn);
14029     tcg_rm_ptr = vec_full_reg_ptr(s, rm);
14030     tcg_imm2   = tcg_const_i32(imm2);
14031     tcg_opcode = tcg_const_i32(opcode);
14032 
14033     gen_helper_crypto_sm3tt(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr, tcg_imm2,
14034                             tcg_opcode);
14035 
14036     tcg_temp_free_ptr(tcg_rd_ptr);
14037     tcg_temp_free_ptr(tcg_rn_ptr);
14038     tcg_temp_free_ptr(tcg_rm_ptr);
14039     tcg_temp_free_i32(tcg_imm2);
14040     tcg_temp_free_i32(tcg_opcode);
14041 }
14042 
14043 /* C3.6 Data processing - SIMD, inc Crypto
14044  *
14045  * As the decode gets a little complex we are using a table based
14046  * approach for this part of the decode.
14047  */
14048 static const AArch64DecodeTable data_proc_simd[] = {
14049     /* pattern  ,  mask     ,  fn                        */
14050     { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
14051     { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
14052     { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
14053     { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
14054     { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
14055     { 0x0e000400, 0x9fe08400, disas_simd_copy },
14056     { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */
14057     /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
14058     { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
14059     { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
14060     { 0x0e000000, 0xbf208c00, disas_simd_tb },
14061     { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
14062     { 0x2e000000, 0xbf208400, disas_simd_ext },
14063     { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
14064     { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
14065     { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
14066     { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
14067     { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
14068     { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
14069     { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
14070     { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
14071     { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
14072     { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
14073     { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
14074     { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
14075     { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
14076     { 0xce000000, 0xff808000, disas_crypto_four_reg },
14077     { 0xce800000, 0xffe00000, disas_crypto_xar },
14078     { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
14079     { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
14080     { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
14081     { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },
14082     { 0x00000000, 0x00000000, NULL }
14083 };
14084 
disas_data_proc_simd(DisasContext * s,uint32_t insn)14085 static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
14086 {
14087     /* Note that this is called with all non-FP cases from
14088      * table C3-6 so it must UNDEF for entries not specifically
14089      * allocated to instructions in that table.
14090      */
14091     AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
14092     if (fn) {
14093         fn(s, insn);
14094     } else {
14095         unallocated_encoding(s);
14096     }
14097 }
14098 
14099 /* C3.6 Data processing - SIMD and floating point */
disas_data_proc_simd_fp(DisasContext * s,uint32_t insn)14100 static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
14101 {
14102     if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
14103         disas_data_proc_fp(s, insn);
14104     } else {
14105         /* SIMD, including crypto */
14106         disas_data_proc_simd(s, insn);
14107     }
14108 }
14109 
14110 /**
14111  * is_guarded_page:
14112  * @env: The cpu environment
14113  * @s: The DisasContext
14114  *
14115  * Return true if the page is guarded.
14116  */
is_guarded_page(CPUARMState * env,DisasContext * s)14117 static bool is_guarded_page(CPUARMState *env, DisasContext *s)
14118 {
14119 #ifdef CONFIG_USER_ONLY
14120     return false;  /* FIXME */
14121 #else
14122     uint64_t addr = s->base.pc_first;
14123     int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
14124     unsigned int index = tlb_index(env, mmu_idx, addr);
14125     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
14126 
14127     /*
14128      * We test this immediately after reading an insn, which means
14129      * that any normal page must be in the TLB.  The only exception
14130      * would be for executing from flash or device memory, which
14131      * does not retain the TLB entry.
14132      *
14133      * FIXME: Assume false for those, for now.  We could use
14134      * arm_cpu_get_phys_page_attrs_debug to re-read the page
14135      * table entry even for that case.
14136      */
14137     return (tlb_hit(entry->addr_code, addr) &&
14138             env_tlb(env)->d[mmu_idx].iotlb[index].attrs.target_tlb_bit0);
14139 #endif
14140 }
14141 
14142 /**
14143  * btype_destination_ok:
14144  * @insn: The instruction at the branch destination
14145  * @bt: SCTLR_ELx.BT
14146  * @btype: PSTATE.BTYPE, and is non-zero
14147  *
14148  * On a guarded page, there are a limited number of insns
14149  * that may be present at the branch target:
14150  *   - branch target identifiers,
14151  *   - paciasp, pacibsp,
14152  *   - BRK insn
14153  *   - HLT insn
14154  * Anything else causes a Branch Target Exception.
14155  *
14156  * Return true if the branch is compatible, false to raise BTITRAP.
14157  */
btype_destination_ok(uint32_t insn,bool bt,int btype)14158 static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
14159 {
14160     if ((insn & 0xfffff01fu) == 0xd503201fu) {
14161         /* HINT space */
14162         switch (extract32(insn, 5, 7)) {
14163         case 0b011001: /* PACIASP */
14164         case 0b011011: /* PACIBSP */
14165             /*
14166              * If SCTLR_ELx.BT, then PACI*SP are not compatible
14167              * with btype == 3.  Otherwise all btype are ok.
14168              */
14169             return !bt || btype != 3;
14170         case 0b100000: /* BTI */
14171             /* Not compatible with any btype.  */
14172             return false;
14173         case 0b100010: /* BTI c */
14174             /* Not compatible with btype == 3 */
14175             return btype != 3;
14176         case 0b100100: /* BTI j */
14177             /* Not compatible with btype == 2 */
14178             return btype != 2;
14179         case 0b100110: /* BTI jc */
14180             /* Compatible with any btype.  */
14181             return true;
14182         }
14183     } else {
14184         switch (insn & 0xffe0001fu) {
14185         case 0xd4200000u: /* BRK */
14186         case 0xd4400000u: /* HLT */
14187             /* Give priority to the breakpoint exception.  */
14188             return true;
14189         }
14190     }
14191     return false;
14192 }
14193 
14194 /* C3.1 A64 instruction index by encoding */
disas_a64_insn(CPUARMState * env,DisasContext * s)14195 static void disas_a64_insn(CPUARMState *env, DisasContext *s)
14196 {
14197     uint32_t insn;
14198 
14199     s->pc_curr = s->base.pc_next;
14200     insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b);
14201     s->insn = insn;
14202     s->base.pc_next += 4;
14203 
14204     s->fp_access_checked = false;
14205 
14206     if (dc_isar_feature(aa64_bti, s)) {
14207         if (s->base.num_insns == 1) {
14208             /*
14209              * At the first insn of the TB, compute s->guarded_page.
14210              * We delayed computing this until successfully reading
14211              * the first insn of the TB, above.  This (mostly) ensures
14212              * that the softmmu tlb entry has been populated, and the
14213              * page table GP bit is available.
14214              *
14215              * Note that we need to compute this even if btype == 0,
14216              * because this value is used for BR instructions later
14217              * where ENV is not available.
14218              */
14219             s->guarded_page = is_guarded_page(env, s);
14220 
14221             /* First insn can have btype set to non-zero.  */
14222             tcg_debug_assert(s->btype >= 0);
14223 
14224             /*
14225              * Note that the Branch Target Exception has fairly high
14226              * priority -- below debugging exceptions but above most
14227              * everything else.  This allows us to handle this now
14228              * instead of waiting until the insn is otherwise decoded.
14229              */
14230             if (s->btype != 0
14231                 && s->guarded_page
14232                 && !btype_destination_ok(insn, s->bt, s->btype)) {
14233                 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
14234                                    syn_btitrap(s->btype),
14235                                    default_exception_el(s));
14236                 return;
14237             }
14238         } else {
14239             /* Not the first insn: btype must be 0.  */
14240             tcg_debug_assert(s->btype == 0);
14241         }
14242     }
14243 
14244     switch (extract32(insn, 25, 4)) {
14245     case 0x0: case 0x1: case 0x3: /* UNALLOCATED */
14246         unallocated_encoding(s);
14247         break;
14248     case 0x2:
14249         if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) {
14250             unallocated_encoding(s);
14251         }
14252         break;
14253     case 0x8: case 0x9: /* Data processing - immediate */
14254         disas_data_proc_imm(s, insn);
14255         break;
14256     case 0xa: case 0xb: /* Branch, exception generation and system insns */
14257         disas_b_exc_sys(s, insn);
14258         break;
14259     case 0x4:
14260     case 0x6:
14261     case 0xc:
14262     case 0xe:      /* Loads and stores */
14263         disas_ldst(s, insn);
14264         break;
14265     case 0x5:
14266     case 0xd:      /* Data processing - register */
14267         disas_data_proc_reg(s, insn);
14268         break;
14269     case 0x7:
14270     case 0xf:      /* Data processing - SIMD and floating point */
14271         disas_data_proc_simd_fp(s, insn);
14272         break;
14273     default:
14274         assert(FALSE); /* all 15 cases should be handled above */
14275         break;
14276     }
14277 
14278     /* if we allocated any temporaries, free them here */
14279     free_tmp_a64(s);
14280 
14281     /*
14282      * After execution of most insns, btype is reset to 0.
14283      * Note that we set btype == -1 when the insn sets btype.
14284      */
14285     if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
14286         reset_btype(s);
14287     }
14288 }
14289 
aarch64_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cpu)14290 static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
14291                                           CPUState *cpu)
14292 {
14293     DisasContext *dc = container_of(dcbase, DisasContext, base);
14294     CPUARMState *env = cpu->env_ptr;
14295     ARMCPU *arm_cpu = env_archcpu(env);
14296     uint32_t tb_flags = dc->base.tb->flags;
14297     int bound, core_mmu_idx;
14298 
14299     dc->isar = &arm_cpu->isar;
14300     dc->condjmp = 0;
14301 
14302     dc->aarch64 = 1;
14303     /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
14304      * there is no secure EL1, so we route exceptions to EL3.
14305      */
14306     dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
14307                                !arm_el_is_aa64(env, 3);
14308     dc->thumb = 0;
14309     dc->sctlr_b = 0;
14310     dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
14311     dc->condexec_mask = 0;
14312     dc->condexec_cond = 0;
14313     core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
14314     dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
14315     dc->tbii = FIELD_EX32(tb_flags, TBFLAG_A64, TBII);
14316     dc->tbid = FIELD_EX32(tb_flags, TBFLAG_A64, TBID);
14317     dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
14318 #if !defined(CONFIG_USER_ONLY)
14319     dc->user = (dc->current_el == 0);
14320 #endif
14321     dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
14322     dc->sve_excp_el = FIELD_EX32(tb_flags, TBFLAG_A64, SVEEXC_EL);
14323     dc->sve_len = (FIELD_EX32(tb_flags, TBFLAG_A64, ZCR_LEN) + 1) * 16;
14324     dc->pauth_active = FIELD_EX32(tb_flags, TBFLAG_A64, PAUTH_ACTIVE);
14325     dc->bt = FIELD_EX32(tb_flags, TBFLAG_A64, BT);
14326     dc->btype = FIELD_EX32(tb_flags, TBFLAG_A64, BTYPE);
14327     dc->unpriv = FIELD_EX32(tb_flags, TBFLAG_A64, UNPRIV);
14328     dc->vec_len = 0;
14329     dc->vec_stride = 0;
14330     dc->cp_regs = arm_cpu->cp_regs;
14331     dc->features = env->features;
14332 
14333     /* Single step state. The code-generation logic here is:
14334      *  SS_ACTIVE == 0:
14335      *   generate code with no special handling for single-stepping (except
14336      *   that anything that can make us go to SS_ACTIVE == 1 must end the TB;
14337      *   this happens anyway because those changes are all system register or
14338      *   PSTATE writes).
14339      *  SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
14340      *   emit code for one insn
14341      *   emit code to clear PSTATE.SS
14342      *   emit code to generate software step exception for completed step
14343      *   end TB (as usual for having generated an exception)
14344      *  SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
14345      *   emit code to generate a software step exception
14346      *   end the TB
14347      */
14348     dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
14349     dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
14350     dc->is_ldex = false;
14351     dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
14352 
14353     /* Bound the number of insns to execute to those left on the page.  */
14354     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
14355 
14356     /* If architectural single step active, limit to 1.  */
14357     if (dc->ss_active) {
14358         bound = 1;
14359     }
14360     dc->base.max_insns = MIN(dc->base.max_insns, bound);
14361 
14362     init_tmp_a64_array(dc);
14363 }
14364 
aarch64_tr_tb_start(DisasContextBase * db,CPUState * cpu)14365 static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
14366 {
14367 }
14368 
aarch64_tr_insn_start(DisasContextBase * dcbase,CPUState * cpu)14369 static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
14370 {
14371     DisasContext *dc = container_of(dcbase, DisasContext, base);
14372 
14373     tcg_gen_insn_start(dc->base.pc_next, 0, 0);
14374     dc->insn_start = tcg_last_op();
14375 }
14376 
aarch64_tr_breakpoint_check(DisasContextBase * dcbase,CPUState * cpu,const CPUBreakpoint * bp)14377 static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
14378                                         const CPUBreakpoint *bp)
14379 {
14380     DisasContext *dc = container_of(dcbase, DisasContext, base);
14381 
14382     if (bp->flags & BP_CPU) {
14383         gen_a64_set_pc_im(dc->base.pc_next);
14384         gen_helper_check_breakpoints(cpu_env);
14385         /* End the TB early; it likely won't be executed */
14386         dc->base.is_jmp = DISAS_TOO_MANY;
14387     } else {
14388         gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
14389         /* The address covered by the breakpoint must be
14390            included in [tb->pc, tb->pc + tb->size) in order
14391            to for it to be properly cleared -- thus we
14392            increment the PC here so that the logic setting
14393            tb->size below does the right thing.  */
14394         dc->base.pc_next += 4;
14395         dc->base.is_jmp = DISAS_NORETURN;
14396     }
14397 
14398     return true;
14399 }
14400 
aarch64_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)14401 static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
14402 {
14403     DisasContext *dc = container_of(dcbase, DisasContext, base);
14404     CPUARMState *env = cpu->env_ptr;
14405 
14406     if (dc->ss_active && !dc->pstate_ss) {
14407         /* Singlestep state is Active-pending.
14408          * If we're in this state at the start of a TB then either
14409          *  a) we just took an exception to an EL which is being debugged
14410          *     and this is the first insn in the exception handler
14411          *  b) debug exceptions were masked and we just unmasked them
14412          *     without changing EL (eg by clearing PSTATE.D)
14413          * In either case we're going to take a swstep exception in the
14414          * "did not step an insn" case, and so the syndrome ISV and EX
14415          * bits should be zero.
14416          */
14417         assert(dc->base.num_insns == 1);
14418         gen_swstep_exception(dc, 0, 0);
14419         dc->base.is_jmp = DISAS_NORETURN;
14420     } else {
14421         disas_a64_insn(env, dc);
14422     }
14423 
14424     translator_loop_temp_check(&dc->base);
14425 }
14426 
aarch64_tr_tb_stop(DisasContextBase * dcbase,CPUState * cpu)14427 static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
14428 {
14429     DisasContext *dc = container_of(dcbase, DisasContext, base);
14430 
14431     if (unlikely(dc->base.singlestep_enabled || dc->ss_active)) {
14432         /* Note that this means single stepping WFI doesn't halt the CPU.
14433          * For conditional branch insns this is harmless unreachable code as
14434          * gen_goto_tb() has already handled emitting the debug exception
14435          * (and thus a tb-jump is not possible when singlestepping).
14436          */
14437         switch (dc->base.is_jmp) {
14438         default:
14439             gen_a64_set_pc_im(dc->base.pc_next);
14440             /* fall through */
14441         case DISAS_EXIT:
14442         case DISAS_JUMP:
14443             if (dc->base.singlestep_enabled) {
14444                 gen_exception_internal(EXCP_DEBUG);
14445             } else {
14446                 gen_step_complete_exception(dc);
14447             }
14448             break;
14449         case DISAS_NORETURN:
14450             break;
14451         }
14452     } else {
14453         switch (dc->base.is_jmp) {
14454         case DISAS_NEXT:
14455         case DISAS_TOO_MANY:
14456             gen_goto_tb(dc, 1, dc->base.pc_next);
14457             break;
14458         default:
14459         case DISAS_UPDATE:
14460             gen_a64_set_pc_im(dc->base.pc_next);
14461             /* fall through */
14462         case DISAS_EXIT:
14463             tcg_gen_exit_tb(NULL, 0);
14464             break;
14465         case DISAS_JUMP:
14466             tcg_gen_lookup_and_goto_ptr();
14467             break;
14468         case DISAS_NORETURN:
14469         case DISAS_SWI:
14470             break;
14471         case DISAS_WFE:
14472             gen_a64_set_pc_im(dc->base.pc_next);
14473             gen_helper_wfe(cpu_env);
14474             break;
14475         case DISAS_YIELD:
14476             gen_a64_set_pc_im(dc->base.pc_next);
14477             gen_helper_yield(cpu_env);
14478             break;
14479         case DISAS_WFI:
14480         {
14481             /* This is a special case because we don't want to just halt the CPU
14482              * if trying to debug across a WFI.
14483              */
14484             TCGv_i32 tmp = tcg_const_i32(4);
14485 
14486             gen_a64_set_pc_im(dc->base.pc_next);
14487             gen_helper_wfi(cpu_env, tmp);
14488             tcg_temp_free_i32(tmp);
14489             /* The helper doesn't necessarily throw an exception, but we
14490              * must go back to the main loop to check for interrupts anyway.
14491              */
14492             tcg_gen_exit_tb(NULL, 0);
14493             break;
14494         }
14495         }
14496     }
14497 }
14498 
aarch64_tr_disas_log(const DisasContextBase * dcbase,CPUState * cpu)14499 static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
14500                                       CPUState *cpu)
14501 {
14502     DisasContext *dc = container_of(dcbase, DisasContext, base);
14503 
14504     qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
14505     log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
14506 }
14507 
14508 const TranslatorOps aarch64_translator_ops = {
14509     .init_disas_context = aarch64_tr_init_disas_context,
14510     .tb_start           = aarch64_tr_tb_start,
14511     .insn_start         = aarch64_tr_insn_start,
14512     .breakpoint_check   = aarch64_tr_breakpoint_check,
14513     .translate_insn     = aarch64_tr_translate_insn,
14514     .tb_stop            = aarch64_tr_tb_stop,
14515     .disas_log          = aarch64_tr_disas_log,
14516 };
14517