xref: /qemu/tcg/arm/tcg-target.c.inc (revision f9734d5d)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Andrzej Zaborowski
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "elf.h"
26#include "../tcg-pool.c.inc"
27
28int arm_arch = __ARM_ARCH;
29
30#ifndef use_idiv_instructions
31bool use_idiv_instructions;
32#endif
33#ifndef use_neon_instructions
34bool use_neon_instructions;
35#endif
36
37/* ??? Ought to think about changing CONFIG_SOFTMMU to always defined.  */
38#ifdef CONFIG_SOFTMMU
39# define USING_SOFTMMU 1
40#else
41# define USING_SOFTMMU 0
42#endif
43
44#ifdef CONFIG_DEBUG_TCG
45static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
46    "%r0",  "%r1",  "%r2",  "%r3",  "%r4",  "%r5",  "%r6",  "%r7",
47    "%r8",  "%r9",  "%r10", "%r11", "%r12", "%sp",  "%r14", "%pc",
48    "%q0",  "%q1",  "%q2",  "%q3",  "%q4",  "%q5",  "%q6",  "%q7",
49    "%q8",  "%q9",  "%q10", "%q11", "%q12", "%q13", "%q14", "%q15",
50};
51#endif
52
53static const int tcg_target_reg_alloc_order[] = {
54    TCG_REG_R4,
55    TCG_REG_R5,
56    TCG_REG_R6,
57    TCG_REG_R7,
58    TCG_REG_R8,
59    TCG_REG_R9,
60    TCG_REG_R10,
61    TCG_REG_R11,
62    TCG_REG_R13,
63    TCG_REG_R0,
64    TCG_REG_R1,
65    TCG_REG_R2,
66    TCG_REG_R3,
67    TCG_REG_R12,
68    TCG_REG_R14,
69
70    TCG_REG_Q0,
71    TCG_REG_Q1,
72    TCG_REG_Q2,
73    TCG_REG_Q3,
74    /* Q4 - Q7 are call-saved, and skipped. */
75    TCG_REG_Q8,
76    TCG_REG_Q9,
77    TCG_REG_Q10,
78    TCG_REG_Q11,
79    TCG_REG_Q12,
80    TCG_REG_Q13,
81    TCG_REG_Q14,
82    TCG_REG_Q15,
83};
84
85static const int tcg_target_call_iarg_regs[4] = {
86    TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
87};
88static const int tcg_target_call_oarg_regs[2] = {
89    TCG_REG_R0, TCG_REG_R1
90};
91
92#define TCG_REG_TMP  TCG_REG_R12
93#define TCG_VEC_TMP  TCG_REG_Q15
94
95enum arm_cond_code_e {
96    COND_EQ = 0x0,
97    COND_NE = 0x1,
98    COND_CS = 0x2,	/* Unsigned greater or equal */
99    COND_CC = 0x3,	/* Unsigned less than */
100    COND_MI = 0x4,	/* Negative */
101    COND_PL = 0x5,	/* Zero or greater */
102    COND_VS = 0x6,	/* Overflow */
103    COND_VC = 0x7,	/* No overflow */
104    COND_HI = 0x8,	/* Unsigned greater than */
105    COND_LS = 0x9,	/* Unsigned less or equal */
106    COND_GE = 0xa,
107    COND_LT = 0xb,
108    COND_GT = 0xc,
109    COND_LE = 0xd,
110    COND_AL = 0xe,
111};
112
113#define TO_CPSR (1 << 20)
114
115#define SHIFT_IMM_LSL(im)	(((im) << 7) | 0x00)
116#define SHIFT_IMM_LSR(im)	(((im) << 7) | 0x20)
117#define SHIFT_IMM_ASR(im)	(((im) << 7) | 0x40)
118#define SHIFT_IMM_ROR(im)	(((im) << 7) | 0x60)
119#define SHIFT_REG_LSL(rs)	(((rs) << 8) | 0x10)
120#define SHIFT_REG_LSR(rs)	(((rs) << 8) | 0x30)
121#define SHIFT_REG_ASR(rs)	(((rs) << 8) | 0x50)
122#define SHIFT_REG_ROR(rs)	(((rs) << 8) | 0x70)
123
124typedef enum {
125    ARITH_AND = 0x0 << 21,
126    ARITH_EOR = 0x1 << 21,
127    ARITH_SUB = 0x2 << 21,
128    ARITH_RSB = 0x3 << 21,
129    ARITH_ADD = 0x4 << 21,
130    ARITH_ADC = 0x5 << 21,
131    ARITH_SBC = 0x6 << 21,
132    ARITH_RSC = 0x7 << 21,
133    ARITH_TST = 0x8 << 21 | TO_CPSR,
134    ARITH_CMP = 0xa << 21 | TO_CPSR,
135    ARITH_CMN = 0xb << 21 | TO_CPSR,
136    ARITH_ORR = 0xc << 21,
137    ARITH_MOV = 0xd << 21,
138    ARITH_BIC = 0xe << 21,
139    ARITH_MVN = 0xf << 21,
140
141    INSN_CLZ       = 0x016f0f10,
142    INSN_RBIT      = 0x06ff0f30,
143
144    INSN_LDR_IMM   = 0x04100000,
145    INSN_LDR_REG   = 0x06100000,
146    INSN_STR_IMM   = 0x04000000,
147    INSN_STR_REG   = 0x06000000,
148
149    INSN_LDRH_IMM  = 0x005000b0,
150    INSN_LDRH_REG  = 0x001000b0,
151    INSN_LDRSH_IMM = 0x005000f0,
152    INSN_LDRSH_REG = 0x001000f0,
153    INSN_STRH_IMM  = 0x004000b0,
154    INSN_STRH_REG  = 0x000000b0,
155
156    INSN_LDRB_IMM  = 0x04500000,
157    INSN_LDRB_REG  = 0x06500000,
158    INSN_LDRSB_IMM = 0x005000d0,
159    INSN_LDRSB_REG = 0x001000d0,
160    INSN_STRB_IMM  = 0x04400000,
161    INSN_STRB_REG  = 0x06400000,
162
163    INSN_LDRD_IMM  = 0x004000d0,
164    INSN_LDRD_REG  = 0x000000d0,
165    INSN_STRD_IMM  = 0x004000f0,
166    INSN_STRD_REG  = 0x000000f0,
167
168    INSN_DMB_ISH   = 0xf57ff05b,
169    INSN_DMB_MCR   = 0xee070fba,
170
171    /* Architected nop introduced in v6k.  */
172    /* ??? This is an MSR (imm) 0,0,0 insn.  Anyone know if this
173       also Just So Happened to do nothing on pre-v6k so that we
174       don't need to conditionalize it?  */
175    INSN_NOP_v6k   = 0xe320f000,
176    /* Otherwise the assembler uses mov r0,r0 */
177    INSN_NOP_v4    = (COND_AL << 28) | ARITH_MOV,
178
179    INSN_VADD      = 0xf2000800,
180    INSN_VAND      = 0xf2000110,
181    INSN_VBIC      = 0xf2100110,
182    INSN_VEOR      = 0xf3000110,
183    INSN_VORN      = 0xf2300110,
184    INSN_VORR      = 0xf2200110,
185    INSN_VSUB      = 0xf3000800,
186    INSN_VMUL      = 0xf2000910,
187    INSN_VQADD     = 0xf2000010,
188    INSN_VQADD_U   = 0xf3000010,
189    INSN_VQSUB     = 0xf2000210,
190    INSN_VQSUB_U   = 0xf3000210,
191    INSN_VMAX      = 0xf2000600,
192    INSN_VMAX_U    = 0xf3000600,
193    INSN_VMIN      = 0xf2000610,
194    INSN_VMIN_U    = 0xf3000610,
195
196    INSN_VABS      = 0xf3b10300,
197    INSN_VMVN      = 0xf3b00580,
198    INSN_VNEG      = 0xf3b10380,
199
200    INSN_VCEQ0     = 0xf3b10100,
201    INSN_VCGT0     = 0xf3b10000,
202    INSN_VCGE0     = 0xf3b10080,
203    INSN_VCLE0     = 0xf3b10180,
204    INSN_VCLT0     = 0xf3b10200,
205
206    INSN_VCEQ      = 0xf3000810,
207    INSN_VCGE      = 0xf2000310,
208    INSN_VCGT      = 0xf2000300,
209    INSN_VCGE_U    = 0xf3000310,
210    INSN_VCGT_U    = 0xf3000300,
211
212    INSN_VSHLI     = 0xf2800510,  /* VSHL (immediate) */
213    INSN_VSARI     = 0xf2800010,  /* VSHR.S */
214    INSN_VSHRI     = 0xf3800010,  /* VSHR.U */
215    INSN_VSLI      = 0xf3800510,
216    INSN_VSHL_S    = 0xf2000400,  /* VSHL.S (register) */
217    INSN_VSHL_U    = 0xf3000400,  /* VSHL.U (register) */
218
219    INSN_VBSL      = 0xf3100110,
220    INSN_VBIT      = 0xf3200110,
221    INSN_VBIF      = 0xf3300110,
222
223    INSN_VTST      = 0xf2000810,
224
225    INSN_VDUP_G    = 0xee800b10,  /* VDUP (ARM core register) */
226    INSN_VDUP_S    = 0xf3b00c00,  /* VDUP (scalar) */
227    INSN_VLDR_D    = 0xed100b00,  /* VLDR.64 */
228    INSN_VLD1      = 0xf4200000,  /* VLD1 (multiple single elements) */
229    INSN_VLD1R     = 0xf4a00c00,  /* VLD1 (single element to all lanes) */
230    INSN_VST1      = 0xf4000000,  /* VST1 (multiple single elements) */
231    INSN_VMOVI     = 0xf2800010,  /* VMOV (immediate) */
232} ARMInsn;
233
234#define INSN_NOP   (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
235
236static const uint8_t tcg_cond_to_arm_cond[] = {
237    [TCG_COND_EQ] = COND_EQ,
238    [TCG_COND_NE] = COND_NE,
239    [TCG_COND_LT] = COND_LT,
240    [TCG_COND_GE] = COND_GE,
241    [TCG_COND_LE] = COND_LE,
242    [TCG_COND_GT] = COND_GT,
243    /* unsigned */
244    [TCG_COND_LTU] = COND_CC,
245    [TCG_COND_GEU] = COND_CS,
246    [TCG_COND_LEU] = COND_LS,
247    [TCG_COND_GTU] = COND_HI,
248};
249
250static int encode_imm(uint32_t imm);
251
252/* TCG private relocation type: add with pc+imm8 */
253#define R_ARM_PC8  11
254
255/* TCG private relocation type: vldr with imm8 << 2 */
256#define R_ARM_PC11 12
257
258static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
259{
260    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
261    ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2;
262
263    if (offset == sextract32(offset, 0, 24)) {
264        *src_rw = deposit32(*src_rw, 0, 24, offset);
265        return true;
266    }
267    return false;
268}
269
270static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
271{
272    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
273    ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
274
275    if (offset >= -0xfff && offset <= 0xfff) {
276        tcg_insn_unit insn = *src_rw;
277        bool u = (offset >= 0);
278        if (!u) {
279            offset = -offset;
280        }
281        insn = deposit32(insn, 23, 1, u);
282        insn = deposit32(insn, 0, 12, offset);
283        *src_rw = insn;
284        return true;
285    }
286    return false;
287}
288
289static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
290{
291    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
292    ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4;
293
294    if (offset >= -0xff && offset <= 0xff) {
295        tcg_insn_unit insn = *src_rw;
296        bool u = (offset >= 0);
297        if (!u) {
298            offset = -offset;
299        }
300        insn = deposit32(insn, 23, 1, u);
301        insn = deposit32(insn, 0, 8, offset);
302        *src_rw = insn;
303        return true;
304    }
305    return false;
306}
307
308static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
309{
310    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
311    ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
312    int rot = encode_imm(offset);
313
314    if (rot >= 0) {
315        *src_rw = deposit32(*src_rw, 0, 12, rol32(offset, rot) | (rot << 7));
316        return true;
317    }
318    return false;
319}
320
321static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
322                        intptr_t value, intptr_t addend)
323{
324    tcg_debug_assert(addend == 0);
325    switch (type) {
326    case R_ARM_PC24:
327        return reloc_pc24(code_ptr, (const tcg_insn_unit *)value);
328    case R_ARM_PC13:
329        return reloc_pc13(code_ptr, (const tcg_insn_unit *)value);
330    case R_ARM_PC11:
331        return reloc_pc11(code_ptr, (const tcg_insn_unit *)value);
332    case R_ARM_PC8:
333        return reloc_pc8(code_ptr, (const tcg_insn_unit *)value);
334    default:
335        g_assert_not_reached();
336    }
337}
338
339#define TCG_CT_CONST_ARM  0x100
340#define TCG_CT_CONST_INV  0x200
341#define TCG_CT_CONST_NEG  0x400
342#define TCG_CT_CONST_ZERO 0x800
343#define TCG_CT_CONST_ORRI 0x1000
344#define TCG_CT_CONST_ANDI 0x2000
345
346#define ALL_GENERAL_REGS  0xffffu
347#define ALL_VECTOR_REGS   0xffff0000u
348
349/*
350 * r0-r2 will be overwritten when reading the tlb entry (softmmu only)
351 * and r0-r1 doing the byte swapping, so don't use these.
352 * r3 is removed for softmmu to avoid clashes with helper arguments.
353 */
354#ifdef CONFIG_SOFTMMU
355#define ALL_QLOAD_REGS \
356    (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
357                          (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \
358                          (1 << TCG_REG_R14)))
359#define ALL_QSTORE_REGS \
360    (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
361                          (1 << TCG_REG_R2) | (1 << TCG_REG_R14) | \
362                          ((TARGET_LONG_BITS == 64) << TCG_REG_R3)))
363#else
364#define ALL_QLOAD_REGS   ALL_GENERAL_REGS
365#define ALL_QSTORE_REGS \
366    (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1)))
367#endif
368
369static inline uint32_t rotl(uint32_t val, int n)
370{
371  return (val << n) | (val >> (32 - n));
372}
373
374/* ARM immediates for ALU instructions are made of an unsigned 8-bit
375   right-rotated by an even amount between 0 and 30. */
376static int encode_imm(uint32_t imm)
377{
378    int shift;
379
380    /* simple case, only lower bits */
381    if ((imm & ~0xff) == 0)
382        return 0;
383    /* then try a simple even shift */
384    shift = ctz32(imm) & ~1;
385    if (((imm >> shift) & ~0xff) == 0)
386        return 32 - shift;
387    /* now try harder with rotations */
388    if ((rotl(imm, 2) & ~0xff) == 0)
389        return 2;
390    if ((rotl(imm, 4) & ~0xff) == 0)
391        return 4;
392    if ((rotl(imm, 6) & ~0xff) == 0)
393        return 6;
394    /* imm can't be encoded */
395    return -1;
396}
397
398static inline int check_fit_imm(uint32_t imm)
399{
400    return encode_imm(imm) >= 0;
401}
402
403/* Return true if v16 is a valid 16-bit shifted immediate.  */
404static bool is_shimm16(uint16_t v16, int *cmode, int *imm8)
405{
406    if (v16 == (v16 & 0xff)) {
407        *cmode = 0x8;
408        *imm8 = v16 & 0xff;
409        return true;
410    } else if (v16 == (v16 & 0xff00)) {
411        *cmode = 0xa;
412        *imm8 = v16 >> 8;
413        return true;
414    }
415    return false;
416}
417
418/* Return true if v32 is a valid 32-bit shifted immediate.  */
419static bool is_shimm32(uint32_t v32, int *cmode, int *imm8)
420{
421    if (v32 == (v32 & 0xff)) {
422        *cmode = 0x0;
423        *imm8 = v32 & 0xff;
424        return true;
425    } else if (v32 == (v32 & 0xff00)) {
426        *cmode = 0x2;
427        *imm8 = (v32 >> 8) & 0xff;
428        return true;
429    } else if (v32 == (v32 & 0xff0000)) {
430        *cmode = 0x4;
431        *imm8 = (v32 >> 16) & 0xff;
432        return true;
433    } else if (v32 == (v32 & 0xff000000)) {
434        *cmode = 0x6;
435        *imm8 = v32 >> 24;
436        return true;
437    }
438    return false;
439}
440
441/* Return true if v32 is a valid 32-bit shifting ones immediate.  */
442static bool is_soimm32(uint32_t v32, int *cmode, int *imm8)
443{
444    if ((v32 & 0xffff00ff) == 0xff) {
445        *cmode = 0xc;
446        *imm8 = (v32 >> 8) & 0xff;
447        return true;
448    } else if ((v32 & 0xff00ffff) == 0xffff) {
449        *cmode = 0xd;
450        *imm8 = (v32 >> 16) & 0xff;
451        return true;
452    }
453    return false;
454}
455
456/*
457 * Return non-zero if v32 can be formed by MOVI+ORR.
458 * Place the parameters for MOVI in (cmode, imm8).
459 * Return the cmode for ORR; the imm8 can be had via extraction from v32.
460 */
461static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
462{
463    int i;
464
465    for (i = 6; i > 0; i -= 2) {
466        /* Mask out one byte we can add with ORR.  */
467        uint32_t tmp = v32 & ~(0xffu << (i * 4));
468        if (is_shimm32(tmp, cmode, imm8) ||
469            is_soimm32(tmp, cmode, imm8)) {
470            break;
471        }
472    }
473    return i;
474}
475
476/* Return true if V is a valid 16-bit or 32-bit shifted immediate.  */
477static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
478{
479    if (v32 == deposit32(v32, 16, 16, v32)) {
480        return is_shimm16(v32, cmode, imm8);
481    } else {
482        return is_shimm32(v32, cmode, imm8);
483    }
484}
485
486/* Test if a constant matches the constraint.
487 * TODO: define constraints for:
488 *
489 * ldr/str offset:   between -0xfff and 0xfff
490 * ldrh/strh offset: between -0xff and 0xff
491 * mov operand2:     values represented with x << (2 * y), x < 0x100
492 * add, sub, eor...: ditto
493 */
494static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
495{
496    if (ct & TCG_CT_CONST) {
497        return 1;
498    } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
499        return 1;
500    } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
501        return 1;
502    } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
503        return 1;
504    } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
505        return 1;
506    }
507
508    switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) {
509    case 0:
510        break;
511    case TCG_CT_CONST_ANDI:
512        val = ~val;
513        /* fallthru */
514    case TCG_CT_CONST_ORRI:
515        if (val == deposit64(val, 32, 32, val)) {
516            int cmode, imm8;
517            return is_shimm1632(val, &cmode, &imm8);
518        }
519        break;
520    default:
521        /* Both bits should not be set for the same insn.  */
522        g_assert_not_reached();
523    }
524
525    return 0;
526}
527
528static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
529{
530    tcg_out32(s, (cond << 28) | 0x0a000000 |
531                    (((offset - 8) >> 2) & 0x00ffffff));
532}
533
534static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
535{
536    tcg_out32(s, (cond << 28) | 0x0b000000 |
537                    (((offset - 8) >> 2) & 0x00ffffff));
538}
539
540static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
541{
542    tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
543}
544
545static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset)
546{
547    tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
548                (((offset - 8) >> 2) & 0x00ffffff));
549}
550
551static inline void tcg_out_dat_reg(TCGContext *s,
552                int cond, int opc, int rd, int rn, int rm, int shift)
553{
554    tcg_out32(s, (cond << 28) | (0 << 25) | opc |
555                    (rn << 16) | (rd << 12) | shift | rm);
556}
557
558static inline void tcg_out_nop(TCGContext *s)
559{
560    tcg_out32(s, INSN_NOP);
561}
562
563static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
564{
565    /* Simple reg-reg move, optimising out the 'do nothing' case */
566    if (rd != rm) {
567        tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
568    }
569}
570
571static inline void tcg_out_bx(TCGContext *s, int cond, TCGReg rn)
572{
573    /* Unless the C portion of QEMU is compiled as thumb, we don't
574       actually need true BX semantics; merely a branch to an address
575       held in a register.  */
576    if (use_armv5t_instructions) {
577        tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
578    } else {
579        tcg_out_mov_reg(s, cond, TCG_REG_PC, rn);
580    }
581}
582
583static inline void tcg_out_dat_imm(TCGContext *s,
584                int cond, int opc, int rd, int rn, int im)
585{
586    tcg_out32(s, (cond << 28) | (1 << 25) | opc |
587                    (rn << 16) | (rd << 12) | im);
588}
589
590/* Note that this routine is used for both LDR and LDRH formats, so we do
591   not wish to include an immediate shift at this point.  */
592static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
593                            TCGReg rn, TCGReg rm, bool u, bool p, bool w)
594{
595    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
596              | (w << 21) | (rn << 16) | (rt << 12) | rm);
597}
598
599static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
600                            TCGReg rn, int imm8, bool p, bool w)
601{
602    bool u = 1;
603    if (imm8 < 0) {
604        imm8 = -imm8;
605        u = 0;
606    }
607    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
608              (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
609}
610
611static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
612                             TCGReg rn, int imm12, bool p, bool w)
613{
614    bool u = 1;
615    if (imm12 < 0) {
616        imm12 = -imm12;
617        u = 0;
618    }
619    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
620              (rn << 16) | (rt << 12) | imm12);
621}
622
623static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt,
624                                   TCGReg rn, int imm12)
625{
626    tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
627}
628
629static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt,
630                                   TCGReg rn, int imm12)
631{
632    tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
633}
634
635static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt,
636                                  TCGReg rn, TCGReg rm)
637{
638    tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
639}
640
641static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt,
642                                  TCGReg rn, TCGReg rm)
643{
644    tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
645}
646
647static inline void tcg_out_ldrd_8(TCGContext *s, int cond, TCGReg rt,
648                                   TCGReg rn, int imm8)
649{
650    tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
651}
652
653static inline void tcg_out_ldrd_r(TCGContext *s, int cond, TCGReg rt,
654                                  TCGReg rn, TCGReg rm)
655{
656    tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
657}
658
659static inline void tcg_out_ldrd_rwb(TCGContext *s, int cond, TCGReg rt,
660                                    TCGReg rn, TCGReg rm)
661{
662    tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
663}
664
665static inline void tcg_out_strd_8(TCGContext *s, int cond, TCGReg rt,
666                                   TCGReg rn, int imm8)
667{
668    tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
669}
670
671static inline void tcg_out_strd_r(TCGContext *s, int cond, TCGReg rt,
672                                  TCGReg rn, TCGReg rm)
673{
674    tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
675}
676
677/* Register pre-increment with base writeback.  */
678static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt,
679                                    TCGReg rn, TCGReg rm)
680{
681    tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
682}
683
684static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt,
685                                    TCGReg rn, TCGReg rm)
686{
687    tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
688}
689
690static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt,
691                                   TCGReg rn, int imm8)
692{
693    tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
694}
695
696static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt,
697                                  TCGReg rn, int imm8)
698{
699    tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
700}
701
702static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt,
703                                   TCGReg rn, TCGReg rm)
704{
705    tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
706}
707
708static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt,
709                                  TCGReg rn, TCGReg rm)
710{
711    tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
712}
713
714static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt,
715                                   TCGReg rn, int imm8)
716{
717    tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
718}
719
720static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt,
721                                   TCGReg rn, TCGReg rm)
722{
723    tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
724}
725
726static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt,
727                                  TCGReg rn, int imm12)
728{
729    tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
730}
731
732static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt,
733                                  TCGReg rn, int imm12)
734{
735    tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
736}
737
738static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt,
739                                 TCGReg rn, TCGReg rm)
740{
741    tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
742}
743
744static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt,
745                                 TCGReg rn, TCGReg rm)
746{
747    tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
748}
749
750static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt,
751                                  TCGReg rn, int imm8)
752{
753    tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
754}
755
756static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt,
757                                  TCGReg rn, TCGReg rm)
758{
759    tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
760}
761
762static void tcg_out_movi_pool(TCGContext *s, int cond, int rd, uint32_t arg)
763{
764    new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
765    tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
766}
767
768static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
769{
770    int rot, diff, opc, sh1, sh2;
771    uint32_t tt0, tt1, tt2;
772
773    /* Check a single MOV/MVN before anything else.  */
774    rot = encode_imm(arg);
775    if (rot >= 0) {
776        tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0,
777                        rotl(arg, rot) | (rot << 7));
778        return;
779    }
780    rot = encode_imm(~arg);
781    if (rot >= 0) {
782        tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0,
783                        rotl(~arg, rot) | (rot << 7));
784        return;
785    }
786
787    /* Check for a pc-relative address.  This will usually be the TB,
788       or within the TB, which is immediately before the code block.  */
789    diff = tcg_pcrel_diff(s, (void *)arg) - 8;
790    if (diff >= 0) {
791        rot = encode_imm(diff);
792        if (rot >= 0) {
793            tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC,
794                            rotl(diff, rot) | (rot << 7));
795            return;
796        }
797    } else {
798        rot = encode_imm(-diff);
799        if (rot >= 0) {
800            tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC,
801                            rotl(-diff, rot) | (rot << 7));
802            return;
803        }
804    }
805
806    /* Use movw + movt.  */
807    if (use_armv7_instructions) {
808        /* movw */
809        tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
810                  | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
811        if (arg & 0xffff0000) {
812            /* movt */
813            tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
814                      | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
815        }
816        return;
817    }
818
819    /* Look for sequences of two insns.  If we have lots of 1's, we can
820       shorten the sequence by beginning with mvn and then clearing
821       higher bits with eor.  */
822    tt0 = arg;
823    opc = ARITH_MOV;
824    if (ctpop32(arg) > 16) {
825        tt0 = ~arg;
826        opc = ARITH_MVN;
827    }
828    sh1 = ctz32(tt0) & ~1;
829    tt1 = tt0 & ~(0xff << sh1);
830    sh2 = ctz32(tt1) & ~1;
831    tt2 = tt1 & ~(0xff << sh2);
832    if (tt2 == 0) {
833        rot = ((32 - sh1) << 7) & 0xf00;
834        tcg_out_dat_imm(s, cond, opc, rd,  0, ((tt0 >> sh1) & 0xff) | rot);
835        rot = ((32 - sh2) << 7) & 0xf00;
836        tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd,
837                        ((tt0 >> sh2) & 0xff) | rot);
838        return;
839    }
840
841    /* Otherwise, drop it into the constant pool.  */
842    tcg_out_movi_pool(s, cond, rd, arg);
843}
844
845static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst,
846                                  TCGArg lhs, TCGArg rhs, int rhs_is_const)
847{
848    /* Emit either the reg,imm or reg,reg form of a data-processing insn.
849     * rhs must satisfy the "rI" constraint.
850     */
851    if (rhs_is_const) {
852        int rot = encode_imm(rhs);
853        tcg_debug_assert(rot >= 0);
854        tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
855    } else {
856        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
857    }
858}
859
860static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv,
861                            TCGReg dst, TCGReg lhs, TCGArg rhs,
862                            bool rhs_is_const)
863{
864    /* Emit either the reg,imm or reg,reg form of a data-processing insn.
865     * rhs must satisfy the "rIK" constraint.
866     */
867    if (rhs_is_const) {
868        int rot = encode_imm(rhs);
869        if (rot < 0) {
870            rhs = ~rhs;
871            rot = encode_imm(rhs);
872            tcg_debug_assert(rot >= 0);
873            opc = opinv;
874        }
875        tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
876    } else {
877        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
878    }
879}
880
881static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg,
882                            TCGArg dst, TCGArg lhs, TCGArg rhs,
883                            bool rhs_is_const)
884{
885    /* Emit either the reg,imm or reg,reg form of a data-processing insn.
886     * rhs must satisfy the "rIN" constraint.
887     */
888    if (rhs_is_const) {
889        int rot = encode_imm(rhs);
890        if (rot < 0) {
891            rhs = -rhs;
892            rot = encode_imm(rhs);
893            tcg_debug_assert(rot >= 0);
894            opc = opneg;
895        }
896        tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
897    } else {
898        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
899    }
900}
901
902static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd,
903                                 TCGReg rn, TCGReg rm)
904{
905    /* if ArchVersion() < 6 && d == n then UNPREDICTABLE;  */
906    if (!use_armv6_instructions && rd == rn) {
907        if (rd == rm) {
908            /* rd == rn == rm; copy an input to tmp first.  */
909            tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
910            rm = rn = TCG_REG_TMP;
911        } else {
912            rn = rm;
913            rm = rd;
914        }
915    }
916    /* mul */
917    tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
918}
919
920static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0,
921                                   TCGReg rd1, TCGReg rn, TCGReg rm)
922{
923    /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE;  */
924    if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
925        if (rd0 == rm || rd1 == rm) {
926            tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
927            rn = TCG_REG_TMP;
928        } else {
929            TCGReg t = rn;
930            rn = rm;
931            rm = t;
932        }
933    }
934    /* umull */
935    tcg_out32(s, (cond << 28) | 0x00800090 |
936              (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
937}
938
939static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0,
940                                   TCGReg rd1, TCGReg rn, TCGReg rm)
941{
942    /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE;  */
943    if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
944        if (rd0 == rm || rd1 == rm) {
945            tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
946            rn = TCG_REG_TMP;
947        } else {
948            TCGReg t = rn;
949            rn = rm;
950            rm = t;
951        }
952    }
953    /* smull */
954    tcg_out32(s, (cond << 28) | 0x00c00090 |
955              (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
956}
957
958static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm)
959{
960    tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
961}
962
963static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm)
964{
965    tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
966}
967
968static inline void tcg_out_ext8s(TCGContext *s, int cond,
969                                 int rd, int rn)
970{
971    if (use_armv6_instructions) {
972        /* sxtb */
973        tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
974    } else {
975        tcg_out_dat_reg(s, cond, ARITH_MOV,
976                        rd, 0, rn, SHIFT_IMM_LSL(24));
977        tcg_out_dat_reg(s, cond, ARITH_MOV,
978                        rd, 0, rd, SHIFT_IMM_ASR(24));
979    }
980}
981
982static inline void tcg_out_ext8u(TCGContext *s, int cond,
983                                 int rd, int rn)
984{
985    tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
986}
987
988static inline void tcg_out_ext16s(TCGContext *s, int cond,
989                                  int rd, int rn)
990{
991    if (use_armv6_instructions) {
992        /* sxth */
993        tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
994    } else {
995        tcg_out_dat_reg(s, cond, ARITH_MOV,
996                        rd, 0, rn, SHIFT_IMM_LSL(16));
997        tcg_out_dat_reg(s, cond, ARITH_MOV,
998                        rd, 0, rd, SHIFT_IMM_ASR(16));
999    }
1000}
1001
1002static inline void tcg_out_ext16u(TCGContext *s, int cond,
1003                                  int rd, int rn)
1004{
1005    if (use_armv6_instructions) {
1006        /* uxth */
1007        tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
1008    } else {
1009        tcg_out_dat_reg(s, cond, ARITH_MOV,
1010                        rd, 0, rn, SHIFT_IMM_LSL(16));
1011        tcg_out_dat_reg(s, cond, ARITH_MOV,
1012                        rd, 0, rd, SHIFT_IMM_LSR(16));
1013    }
1014}
1015
1016static void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn, int flags)
1017{
1018    if (use_armv6_instructions) {
1019        if (flags & TCG_BSWAP_OS) {
1020            /* revsh */
1021            tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
1022            return;
1023        }
1024
1025        /* rev16 */
1026        tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
1027        if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1028            /* uxth */
1029            tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
1030        }
1031        return;
1032    }
1033
1034    if (flags == 0) {
1035        /*
1036         * For stores, no input or output extension:
1037         *                              rn  = xxAB
1038         * lsr tmp, rn, #8              tmp = 0xxA
1039         * and tmp, tmp, #0xff          tmp = 000A
1040         * orr rd, tmp, rn, lsl #8      rd  = xABA
1041         */
1042        tcg_out_dat_reg(s, cond, ARITH_MOV,
1043                        TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
1044        tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
1045        tcg_out_dat_reg(s, cond, ARITH_ORR,
1046                        rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
1047        return;
1048    }
1049
1050    /*
1051     * Byte swap, leaving the result at the top of the register.
1052     * We will then shift down, zero or sign-extending.
1053     */
1054    if (flags & TCG_BSWAP_IZ) {
1055        /*
1056         *                              rn  = 00AB
1057         * ror tmp, rn, #8              tmp = B00A
1058         * orr tmp, tmp, tmp, lsl #16   tmp = BA00
1059         */
1060        tcg_out_dat_reg(s, cond, ARITH_MOV,
1061                        TCG_REG_TMP, 0, rn, SHIFT_IMM_ROR(8));
1062        tcg_out_dat_reg(s, cond, ARITH_ORR,
1063                        TCG_REG_TMP, TCG_REG_TMP, TCG_REG_TMP,
1064                        SHIFT_IMM_LSL(16));
1065    } else {
1066        /*
1067         *                              rn  = xxAB
1068         * and tmp, rn, #0xff00         tmp = 00A0
1069         * lsl tmp, tmp, #8             tmp = 0A00
1070         * orr tmp, tmp, rn, lsl #24    tmp = BA00
1071         */
1072        tcg_out_dat_rI(s, cond, ARITH_AND, TCG_REG_TMP, rn, 0xff00, 1);
1073        tcg_out_dat_reg(s, cond, ARITH_MOV,
1074                        TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSL(8));
1075        tcg_out_dat_reg(s, cond, ARITH_ORR,
1076                        TCG_REG_TMP, TCG_REG_TMP, rn, SHIFT_IMM_LSL(24));
1077    }
1078    tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, TCG_REG_TMP,
1079                    (flags & TCG_BSWAP_OS
1080                     ? SHIFT_IMM_ASR(8) : SHIFT_IMM_LSR(8)));
1081}
1082
1083static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
1084{
1085    if (use_armv6_instructions) {
1086        /* rev */
1087        tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
1088    } else {
1089        tcg_out_dat_reg(s, cond, ARITH_EOR,
1090                        TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16));
1091        tcg_out_dat_imm(s, cond, ARITH_BIC,
1092                        TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800);
1093        tcg_out_dat_reg(s, cond, ARITH_MOV,
1094                        rd, 0, rn, SHIFT_IMM_ROR(8));
1095        tcg_out_dat_reg(s, cond, ARITH_EOR,
1096                        rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8));
1097    }
1098}
1099
1100static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
1101                                   TCGArg a1, int ofs, int len, bool const_a1)
1102{
1103    if (const_a1) {
1104        /* bfi becomes bfc with rn == 15.  */
1105        a1 = 15;
1106    }
1107    /* bfi/bfc */
1108    tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
1109              | (ofs << 7) | ((ofs + len - 1) << 16));
1110}
1111
1112static inline void tcg_out_extract(TCGContext *s, int cond, TCGReg rd,
1113                                   TCGArg a1, int ofs, int len)
1114{
1115    /* ubfx */
1116    tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | a1
1117              | (ofs << 7) | ((len - 1) << 16));
1118}
1119
1120static inline void tcg_out_sextract(TCGContext *s, int cond, TCGReg rd,
1121                                    TCGArg a1, int ofs, int len)
1122{
1123    /* sbfx */
1124    tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | a1
1125              | (ofs << 7) | ((len - 1) << 16));
1126}
1127
1128static inline void tcg_out_ld32u(TCGContext *s, int cond,
1129                int rd, int rn, int32_t offset)
1130{
1131    if (offset > 0xfff || offset < -0xfff) {
1132        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1133        tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
1134    } else
1135        tcg_out_ld32_12(s, cond, rd, rn, offset);
1136}
1137
1138static inline void tcg_out_st32(TCGContext *s, int cond,
1139                int rd, int rn, int32_t offset)
1140{
1141    if (offset > 0xfff || offset < -0xfff) {
1142        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1143        tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
1144    } else
1145        tcg_out_st32_12(s, cond, rd, rn, offset);
1146}
1147
1148static inline void tcg_out_ld16u(TCGContext *s, int cond,
1149                int rd, int rn, int32_t offset)
1150{
1151    if (offset > 0xff || offset < -0xff) {
1152        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1153        tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
1154    } else
1155        tcg_out_ld16u_8(s, cond, rd, rn, offset);
1156}
1157
1158static inline void tcg_out_ld16s(TCGContext *s, int cond,
1159                int rd, int rn, int32_t offset)
1160{
1161    if (offset > 0xff || offset < -0xff) {
1162        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1163        tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
1164    } else
1165        tcg_out_ld16s_8(s, cond, rd, rn, offset);
1166}
1167
1168static inline void tcg_out_st16(TCGContext *s, int cond,
1169                int rd, int rn, int32_t offset)
1170{
1171    if (offset > 0xff || offset < -0xff) {
1172        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1173        tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
1174    } else
1175        tcg_out_st16_8(s, cond, rd, rn, offset);
1176}
1177
1178static inline void tcg_out_ld8u(TCGContext *s, int cond,
1179                int rd, int rn, int32_t offset)
1180{
1181    if (offset > 0xfff || offset < -0xfff) {
1182        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1183        tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
1184    } else
1185        tcg_out_ld8_12(s, cond, rd, rn, offset);
1186}
1187
1188static inline void tcg_out_ld8s(TCGContext *s, int cond,
1189                int rd, int rn, int32_t offset)
1190{
1191    if (offset > 0xff || offset < -0xff) {
1192        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1193        tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
1194    } else
1195        tcg_out_ld8s_8(s, cond, rd, rn, offset);
1196}
1197
1198static inline void tcg_out_st8(TCGContext *s, int cond,
1199                int rd, int rn, int32_t offset)
1200{
1201    if (offset > 0xfff || offset < -0xfff) {
1202        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1203        tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
1204    } else
1205        tcg_out_st8_12(s, cond, rd, rn, offset);
1206}
1207
1208/* The _goto case is normally between TBs within the same code buffer, and
1209 * with the code buffer limited to 16MB we wouldn't need the long case.
1210 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
1211 */
1212static void tcg_out_goto(TCGContext *s, int cond, const tcg_insn_unit *addr)
1213{
1214    intptr_t addri = (intptr_t)addr;
1215    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1216
1217    if ((addri & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
1218        tcg_out_b(s, cond, disp);
1219        return;
1220    }
1221    tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
1222}
1223
1224/* The call case is mostly used for helpers - so it's not unreasonable
1225 * for them to be beyond branch range */
1226static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr)
1227{
1228    intptr_t addri = (intptr_t)addr;
1229    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1230
1231    if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
1232        if (addri & 1) {
1233            /* Use BLX if the target is in Thumb mode */
1234            if (!use_armv5t_instructions) {
1235                tcg_abort();
1236            }
1237            tcg_out_blx_imm(s, disp);
1238        } else {
1239            tcg_out_bl(s, COND_AL, disp);
1240        }
1241    } else if (use_armv7_instructions) {
1242        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
1243        tcg_out_blx(s, COND_AL, TCG_REG_TMP);
1244    } else {
1245        /* ??? Know that movi_pool emits exactly 1 insn.  */
1246        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 0);
1247        tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri);
1248    }
1249}
1250
1251static inline void tcg_out_goto_label(TCGContext *s, int cond, TCGLabel *l)
1252{
1253    if (l->has_value) {
1254        tcg_out_goto(s, cond, l->u.value_ptr);
1255    } else {
1256        tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
1257        tcg_out_b(s, cond, 0);
1258    }
1259}
1260
1261static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
1262{
1263    if (use_armv7_instructions) {
1264        tcg_out32(s, INSN_DMB_ISH);
1265    } else if (use_armv6_instructions) {
1266        tcg_out32(s, INSN_DMB_MCR);
1267    }
1268}
1269
1270static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1271                            const int *const_args)
1272{
1273    TCGReg al = args[0];
1274    TCGReg ah = args[1];
1275    TCGArg bl = args[2];
1276    TCGArg bh = args[3];
1277    TCGCond cond = args[4];
1278    int const_bl = const_args[2];
1279    int const_bh = const_args[3];
1280
1281    switch (cond) {
1282    case TCG_COND_EQ:
1283    case TCG_COND_NE:
1284    case TCG_COND_LTU:
1285    case TCG_COND_LEU:
1286    case TCG_COND_GTU:
1287    case TCG_COND_GEU:
1288        /* We perform a conditional comparision.  If the high half is
1289           equal, then overwrite the flags with the comparison of the
1290           low half.  The resulting flags cover the whole.  */
1291        tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
1292        tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
1293        return cond;
1294
1295    case TCG_COND_LT:
1296    case TCG_COND_GE:
1297        /* We perform a double-word subtraction and examine the result.
1298           We do not actually need the result of the subtract, so the
1299           low part "subtract" is a compare.  For the high half we have
1300           no choice but to compute into a temporary.  */
1301        tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl);
1302        tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR,
1303                       TCG_REG_TMP, ah, bh, const_bh);
1304        return cond;
1305
1306    case TCG_COND_LE:
1307    case TCG_COND_GT:
1308        /* Similar, but with swapped arguments, via reversed subtract.  */
1309        tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR,
1310                       TCG_REG_TMP, al, bl, const_bl);
1311        tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR,
1312                       TCG_REG_TMP, ah, bh, const_bh);
1313        return tcg_swap_cond(cond);
1314
1315    default:
1316        g_assert_not_reached();
1317    }
1318}
1319
1320/*
1321 * Note that TCGReg references Q-registers.
1322 * Q-regno = 2 * D-regno, so shift left by 1 whlie inserting.
1323 */
1324static uint32_t encode_vd(TCGReg rd)
1325{
1326    tcg_debug_assert(rd >= TCG_REG_Q0);
1327    return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13);
1328}
1329
1330static uint32_t encode_vn(TCGReg rn)
1331{
1332    tcg_debug_assert(rn >= TCG_REG_Q0);
1333    return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17);
1334}
1335
1336static uint32_t encode_vm(TCGReg rm)
1337{
1338    tcg_debug_assert(rm >= TCG_REG_Q0);
1339    return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1);
1340}
1341
1342static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece,
1343                          TCGReg d, TCGReg m)
1344{
1345    tcg_out32(s, insn | (vece << 18) | (q << 6) |
1346              encode_vd(d) | encode_vm(m));
1347}
1348
1349static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece,
1350                          TCGReg d, TCGReg n, TCGReg m)
1351{
1352    tcg_out32(s, insn | (vece << 20) | (q << 6) |
1353              encode_vd(d) | encode_vn(n) | encode_vm(m));
1354}
1355
1356static void tcg_out_vmovi(TCGContext *s, TCGReg rd,
1357                          int q, int op, int cmode, uint8_t imm8)
1358{
1359    tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5)
1360              | (cmode << 8) | extract32(imm8, 0, 4)
1361              | (extract32(imm8, 4, 3) << 16)
1362              | (extract32(imm8, 7, 1) << 24));
1363}
1364
1365static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q,
1366                            TCGReg rd, TCGReg rm, int l_imm6)
1367{
1368    tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) |
1369              (extract32(l_imm6, 6, 1) << 7) |
1370              (extract32(l_imm6, 0, 6) << 16));
1371}
1372
1373static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
1374                          TCGReg rd, TCGReg rn, int offset)
1375{
1376    if (offset != 0) {
1377        if (check_fit_imm(offset) || check_fit_imm(-offset)) {
1378            tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1379                            TCG_REG_TMP, rn, offset, true);
1380        } else {
1381            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
1382            tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1383                            TCG_REG_TMP, TCG_REG_TMP, rn, 0);
1384        }
1385        rn = TCG_REG_TMP;
1386    }
1387    tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf);
1388}
1389
1390#ifdef CONFIG_SOFTMMU
1391#include "../tcg-ldst.c.inc"
1392
1393/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1394 *                                     int mmu_idx, uintptr_t ra)
1395 */
1396static void * const qemu_ld_helpers[8] = {
1397    [MO_UB]   = helper_ret_ldub_mmu,
1398    [MO_SB]   = helper_ret_ldsb_mmu,
1399#ifdef HOST_WORDS_BIGENDIAN
1400    [MO_UW] = helper_be_lduw_mmu,
1401    [MO_UL] = helper_be_ldul_mmu,
1402    [MO_Q]  = helper_be_ldq_mmu,
1403    [MO_SW] = helper_be_ldsw_mmu,
1404    [MO_SL] = helper_be_ldul_mmu,
1405#else
1406    [MO_UW] = helper_le_lduw_mmu,
1407    [MO_UL] = helper_le_ldul_mmu,
1408    [MO_Q]  = helper_le_ldq_mmu,
1409    [MO_SW] = helper_le_ldsw_mmu,
1410    [MO_SL] = helper_le_ldul_mmu,
1411#endif
1412};
1413
1414/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1415 *                                     uintxx_t val, int mmu_idx, uintptr_t ra)
1416 */
1417static void * const qemu_st_helpers[4] = {
1418    [MO_8]   = helper_ret_stb_mmu,
1419#ifdef HOST_WORDS_BIGENDIAN
1420    [MO_16] = helper_be_stw_mmu,
1421    [MO_32] = helper_be_stl_mmu,
1422    [MO_64] = helper_be_stq_mmu,
1423#else
1424    [MO_16] = helper_le_stw_mmu,
1425    [MO_32] = helper_le_stl_mmu,
1426    [MO_64] = helper_le_stq_mmu,
1427#endif
1428};
1429
1430/* Helper routines for marshalling helper function arguments into
1431 * the correct registers and stack.
1432 * argreg is where we want to put this argument, arg is the argument itself.
1433 * Return value is the updated argreg ready for the next call.
1434 * Note that argreg 0..3 is real registers, 4+ on stack.
1435 *
1436 * We provide routines for arguments which are: immediate, 32 bit
1437 * value in register, 16 and 8 bit values in register (which must be zero
1438 * extended before use) and 64 bit value in a lo:hi register pair.
1439 */
1440#define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG)                \
1441static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg)              \
1442{                                                                          \
1443    if (argreg < 4) {                                                      \
1444        MOV_ARG(s, COND_AL, argreg, arg);                                  \
1445    } else {                                                               \
1446        int ofs = (argreg - 4) * 4;                                        \
1447        EXT_ARG;                                                           \
1448        tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE);            \
1449        tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs);         \
1450    }                                                                      \
1451    return argreg + 1;                                                     \
1452}
1453
1454DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
1455    (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1456DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u,
1457    (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1458DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u,
1459    (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1460DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
1461
1462static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
1463                                TCGReg arglo, TCGReg arghi)
1464{
1465    /* 64 bit arguments must go in even/odd register pairs
1466     * and in 8-aligned stack slots.
1467     */
1468    if (argreg & 1) {
1469        argreg++;
1470    }
1471    if (use_armv6_instructions && argreg >= 4
1472        && (arglo & 1) == 0 && arghi == arglo + 1) {
1473        tcg_out_strd_8(s, COND_AL, arglo,
1474                       TCG_REG_CALL_STACK, (argreg - 4) * 4);
1475        return argreg + 2;
1476    } else {
1477        argreg = tcg_out_arg_reg32(s, argreg, arglo);
1478        argreg = tcg_out_arg_reg32(s, argreg, arghi);
1479        return argreg;
1480    }
1481}
1482
1483#define TLB_SHIFT	(CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1484
1485/* We expect to use an 9-bit sign-magnitude negative offset from ENV.  */
1486QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1487QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256);
1488
1489/* These offsets are built into the LDRD below.  */
1490QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
1491QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
1492
1493/* Load and compare a TLB entry, leaving the flags set.  Returns the register
1494   containing the addend of the tlb entry.  Clobbers R0, R1, R2, TMP.  */
1495
1496static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1497                               MemOp opc, int mem_index, bool is_load)
1498{
1499    int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
1500                   : offsetof(CPUTLBEntry, addr_write));
1501    int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1502    int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1503    int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1504    unsigned s_bits = opc & MO_SIZE;
1505    unsigned a_bits = get_alignment_bits(opc);
1506
1507    /*
1508     * We don't support inline unaligned acceses, but we can easily
1509     * support overalignment checks.
1510     */
1511    if (a_bits < s_bits) {
1512        a_bits = s_bits;
1513    }
1514
1515    /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}.  */
1516    if (use_armv6_instructions) {
1517        tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
1518    } else {
1519        tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R0, TCG_AREG0, mask_off);
1520        tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R1, TCG_AREG0, table_off);
1521    }
1522
1523    /* Extract the tlb index from the address into R0.  */
1524    tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
1525                    SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
1526
1527    /*
1528     * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
1529     * Load the tlb comparator into R2/R3 and the fast path addend into R1.
1530     */
1531    if (cmp_off == 0) {
1532        if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
1533            tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1534        } else {
1535            tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1536        }
1537    } else {
1538        tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1539                        TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
1540        if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
1541            tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1542        } else {
1543            tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1544        }
1545    }
1546    if (!use_armv6_instructions && TARGET_LONG_BITS == 64) {
1547        tcg_out_ld32_12(s, COND_AL, TCG_REG_R3, TCG_REG_R1, cmp_off + 4);
1548    }
1549
1550    /* Load the tlb addend.  */
1551    tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
1552                    offsetof(CPUTLBEntry, addend));
1553
1554    /*
1555     * Check alignment, check comparators.
1556     * Do this in no more than 3 insns.  Use MOVW for v7, if possible,
1557     * to reduce the number of sequential conditional instructions.
1558     * Almost all guests have at least 4k pages, which means that we need
1559     * to clear at least 9 bits even for an 8-byte memory, which means it
1560     * isn't worth checking for an immediate operand for BIC.
1561     */
1562    if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) {
1563        tcg_target_ulong mask = ~(TARGET_PAGE_MASK | ((1 << a_bits) - 1));
1564
1565        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, mask);
1566        tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
1567                        addrlo, TCG_REG_TMP, 0);
1568        tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0);
1569    } else {
1570        if (a_bits) {
1571            tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo,
1572                            (1 << a_bits) - 1);
1573        }
1574        tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, addrlo,
1575                        SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1576        tcg_out_dat_reg(s, (a_bits ? COND_EQ : COND_AL), ARITH_CMP,
1577                        0, TCG_REG_R2, TCG_REG_TMP,
1578                        SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1579    }
1580
1581    if (TARGET_LONG_BITS == 64) {
1582        tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
1583    }
1584
1585    return TCG_REG_R1;
1586}
1587
1588/* Record the context of a call to the out of line helper code for the slow
1589   path for a load or store, so that we can later generate the correct
1590   helper code.  */
1591static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1592                                TCGReg datalo, TCGReg datahi, TCGReg addrlo,
1593                                TCGReg addrhi, tcg_insn_unit *raddr,
1594                                tcg_insn_unit *label_ptr)
1595{
1596    TCGLabelQemuLdst *label = new_ldst_label(s);
1597
1598    label->is_ld = is_ld;
1599    label->oi = oi;
1600    label->datalo_reg = datalo;
1601    label->datahi_reg = datahi;
1602    label->addrlo_reg = addrlo;
1603    label->addrhi_reg = addrhi;
1604    label->raddr = tcg_splitwx_to_rx(raddr);
1605    label->label_ptr[0] = label_ptr;
1606}
1607
1608static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1609{
1610    TCGReg argreg, datalo, datahi;
1611    TCGMemOpIdx oi = lb->oi;
1612    MemOp opc = get_memop(oi);
1613    void *func;
1614
1615    if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1616        return false;
1617    }
1618
1619    argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
1620    if (TARGET_LONG_BITS == 64) {
1621        argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1622    } else {
1623        argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1624    }
1625    argreg = tcg_out_arg_imm32(s, argreg, oi);
1626    argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1627
1628    /* For armv6 we can use the canonical unsigned helpers and minimize
1629       icache usage.  For pre-armv6, use the signed helpers since we do
1630       not have a single insn sign-extend.  */
1631    if (use_armv6_instructions) {
1632        func = qemu_ld_helpers[opc & MO_SIZE];
1633    } else {
1634        func = qemu_ld_helpers[opc & MO_SSIZE];
1635        if (opc & MO_SIGN) {
1636            opc = MO_UL;
1637        }
1638    }
1639    tcg_out_call(s, func);
1640
1641    datalo = lb->datalo_reg;
1642    datahi = lb->datahi_reg;
1643    switch (opc & MO_SSIZE) {
1644    case MO_SB:
1645        tcg_out_ext8s(s, COND_AL, datalo, TCG_REG_R0);
1646        break;
1647    case MO_SW:
1648        tcg_out_ext16s(s, COND_AL, datalo, TCG_REG_R0);
1649        break;
1650    default:
1651        tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1652        break;
1653    case MO_Q:
1654        if (datalo != TCG_REG_R1) {
1655            tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1656            tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1657        } else if (datahi != TCG_REG_R0) {
1658            tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1659            tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1660        } else {
1661            tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0);
1662            tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1663            tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP);
1664        }
1665        break;
1666    }
1667
1668    tcg_out_goto(s, COND_AL, lb->raddr);
1669    return true;
1670}
1671
1672static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1673{
1674    TCGReg argreg, datalo, datahi;
1675    TCGMemOpIdx oi = lb->oi;
1676    MemOp opc = get_memop(oi);
1677
1678    if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1679        return false;
1680    }
1681
1682    argreg = TCG_REG_R0;
1683    argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
1684    if (TARGET_LONG_BITS == 64) {
1685        argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1686    } else {
1687        argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1688    }
1689
1690    datalo = lb->datalo_reg;
1691    datahi = lb->datahi_reg;
1692    switch (opc & MO_SIZE) {
1693    case MO_8:
1694        argreg = tcg_out_arg_reg8(s, argreg, datalo);
1695        break;
1696    case MO_16:
1697        argreg = tcg_out_arg_reg16(s, argreg, datalo);
1698        break;
1699    case MO_32:
1700    default:
1701        argreg = tcg_out_arg_reg32(s, argreg, datalo);
1702        break;
1703    case MO_64:
1704        argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi);
1705        break;
1706    }
1707
1708    argreg = tcg_out_arg_imm32(s, argreg, oi);
1709    argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1710
1711    /* Tail-call to the helper, which will return to the fast path.  */
1712    tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]);
1713    return true;
1714}
1715#endif /* SOFTMMU */
1716
1717static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
1718                                         TCGReg datalo, TCGReg datahi,
1719                                         TCGReg addrlo, TCGReg addend)
1720{
1721    /* Byte swapping is left to middle-end expansion. */
1722    tcg_debug_assert((opc & MO_BSWAP) == 0);
1723
1724    switch (opc & MO_SSIZE) {
1725    case MO_UB:
1726        tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend);
1727        break;
1728    case MO_SB:
1729        tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend);
1730        break;
1731    case MO_UW:
1732        tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
1733        break;
1734    case MO_SW:
1735        tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
1736        break;
1737    case MO_UL:
1738        tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
1739        break;
1740    case MO_Q:
1741        /* Avoid ldrd for user-only emulation, to handle unaligned.  */
1742        if (USING_SOFTMMU && use_armv6_instructions
1743            && (datalo & 1) == 0 && datahi == datalo + 1) {
1744            tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend);
1745        } else if (datalo != addend) {
1746            tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo);
1747            tcg_out_ld32_12(s, COND_AL, datahi, addend, 4);
1748        } else {
1749            tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
1750                            addend, addrlo, SHIFT_IMM_LSL(0));
1751            tcg_out_ld32_12(s, COND_AL, datalo, TCG_REG_TMP, 0);
1752            tcg_out_ld32_12(s, COND_AL, datahi, TCG_REG_TMP, 4);
1753        }
1754        break;
1755    default:
1756        g_assert_not_reached();
1757    }
1758}
1759
1760static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc,
1761                                          TCGReg datalo, TCGReg datahi,
1762                                          TCGReg addrlo)
1763{
1764    /* Byte swapping is left to middle-end expansion. */
1765    tcg_debug_assert((opc & MO_BSWAP) == 0);
1766
1767    switch (opc & MO_SSIZE) {
1768    case MO_UB:
1769        tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0);
1770        break;
1771    case MO_SB:
1772        tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0);
1773        break;
1774    case MO_UW:
1775        tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
1776        break;
1777    case MO_SW:
1778        tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
1779        break;
1780    case MO_UL:
1781        tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1782        break;
1783    case MO_Q:
1784        /* Avoid ldrd for user-only emulation, to handle unaligned.  */
1785        if (USING_SOFTMMU && use_armv6_instructions
1786            && (datalo & 1) == 0 && datahi == datalo + 1) {
1787            tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0);
1788        } else if (datalo == addrlo) {
1789            tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
1790            tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1791        } else {
1792            tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1793            tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
1794        }
1795        break;
1796    default:
1797        g_assert_not_reached();
1798    }
1799}
1800
1801static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
1802{
1803    TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1804    TCGMemOpIdx oi;
1805    MemOp opc;
1806#ifdef CONFIG_SOFTMMU
1807    int mem_index;
1808    TCGReg addend;
1809    tcg_insn_unit *label_ptr;
1810#endif
1811
1812    datalo = *args++;
1813    datahi = (is64 ? *args++ : 0);
1814    addrlo = *args++;
1815    addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1816    oi = *args++;
1817    opc = get_memop(oi);
1818
1819#ifdef CONFIG_SOFTMMU
1820    mem_index = get_mmuidx(oi);
1821    addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1);
1822
1823    /* This a conditional BL only to load a pointer within this opcode into LR
1824       for the slow path.  We will not be using the value for a tail call.  */
1825    label_ptr = s->code_ptr;
1826    tcg_out_bl(s, COND_NE, 0);
1827
1828    tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend);
1829
1830    add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
1831                        s->code_ptr, label_ptr);
1832#else /* !CONFIG_SOFTMMU */
1833    if (guest_base) {
1834        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
1835        tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP);
1836    } else {
1837        tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo);
1838    }
1839#endif
1840}
1841
1842static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc,
1843                                         TCGReg datalo, TCGReg datahi,
1844                                         TCGReg addrlo, TCGReg addend)
1845{
1846    /* Byte swapping is left to middle-end expansion. */
1847    tcg_debug_assert((opc & MO_BSWAP) == 0);
1848
1849    switch (opc & MO_SIZE) {
1850    case MO_8:
1851        tcg_out_st8_r(s, cond, datalo, addrlo, addend);
1852        break;
1853    case MO_16:
1854        tcg_out_st16_r(s, cond, datalo, addrlo, addend);
1855        break;
1856    case MO_32:
1857        tcg_out_st32_r(s, cond, datalo, addrlo, addend);
1858        break;
1859    case MO_64:
1860        /* Avoid strd for user-only emulation, to handle unaligned.  */
1861        if (USING_SOFTMMU && use_armv6_instructions
1862            && (datalo & 1) == 0 && datahi == datalo + 1) {
1863            tcg_out_strd_r(s, cond, datalo, addrlo, addend);
1864        } else {
1865            tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
1866            tcg_out_st32_12(s, cond, datahi, addend, 4);
1867        }
1868        break;
1869    default:
1870        g_assert_not_reached();
1871    }
1872}
1873
1874static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc,
1875                                          TCGReg datalo, TCGReg datahi,
1876                                          TCGReg addrlo)
1877{
1878    /* Byte swapping is left to middle-end expansion. */
1879    tcg_debug_assert((opc & MO_BSWAP) == 0);
1880
1881    switch (opc & MO_SIZE) {
1882    case MO_8:
1883        tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0);
1884        break;
1885    case MO_16:
1886        tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
1887        break;
1888    case MO_32:
1889        tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1890        break;
1891    case MO_64:
1892        /* Avoid strd for user-only emulation, to handle unaligned.  */
1893        if (USING_SOFTMMU && use_armv6_instructions
1894            && (datalo & 1) == 0 && datahi == datalo + 1) {
1895            tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
1896        } else {
1897            tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1898            tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4);
1899        }
1900        break;
1901    default:
1902        g_assert_not_reached();
1903    }
1904}
1905
1906static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
1907{
1908    TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1909    TCGMemOpIdx oi;
1910    MemOp opc;
1911#ifdef CONFIG_SOFTMMU
1912    int mem_index;
1913    TCGReg addend;
1914    tcg_insn_unit *label_ptr;
1915#endif
1916
1917    datalo = *args++;
1918    datahi = (is64 ? *args++ : 0);
1919    addrlo = *args++;
1920    addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1921    oi = *args++;
1922    opc = get_memop(oi);
1923
1924#ifdef CONFIG_SOFTMMU
1925    mem_index = get_mmuidx(oi);
1926    addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0);
1927
1928    tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend);
1929
1930    /* The conditional call must come last, as we're going to return here.  */
1931    label_ptr = s->code_ptr;
1932    tcg_out_bl(s, COND_NE, 0);
1933
1934    add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
1935                        s->code_ptr, label_ptr);
1936#else /* !CONFIG_SOFTMMU */
1937    if (guest_base) {
1938        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
1939        tcg_out_qemu_st_index(s, COND_AL, opc, datalo,
1940                              datahi, addrlo, TCG_REG_TMP);
1941    } else {
1942        tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo);
1943    }
1944#endif
1945}
1946
1947static void tcg_out_epilogue(TCGContext *s);
1948
1949static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1950                const TCGArg args[TCG_MAX_OP_ARGS],
1951                const int const_args[TCG_MAX_OP_ARGS])
1952{
1953    TCGArg a0, a1, a2, a3, a4, a5;
1954    int c;
1955
1956    switch (opc) {
1957    case INDEX_op_exit_tb:
1958        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, args[0]);
1959        tcg_out_epilogue(s);
1960        break;
1961    case INDEX_op_goto_tb:
1962        {
1963            /* Indirect jump method */
1964            intptr_t ptr, dif, dil;
1965            TCGReg base = TCG_REG_PC;
1966
1967            tcg_debug_assert(s->tb_jmp_insn_offset == 0);
1968            ptr = (intptr_t)tcg_splitwx_to_rx(s->tb_jmp_target_addr + args[0]);
1969            dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
1970            dil = sextract32(dif, 0, 12);
1971            if (dif != dil) {
1972                /* The TB is close, but outside the 12 bits addressable by
1973                   the load.  We can extend this to 20 bits with a sub of a
1974                   shifted immediate from pc.  In the vastly unlikely event
1975                   the code requires more than 1MB, we'll use 2 insns and
1976                   be no worse off.  */
1977                base = TCG_REG_R0;
1978                tcg_out_movi32(s, COND_AL, base, ptr - dil);
1979            }
1980            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
1981            set_jmp_reset_offset(s, args[0]);
1982        }
1983        break;
1984    case INDEX_op_goto_ptr:
1985        tcg_out_bx(s, COND_AL, args[0]);
1986        break;
1987    case INDEX_op_br:
1988        tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
1989        break;
1990
1991    case INDEX_op_ld8u_i32:
1992        tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1993        break;
1994    case INDEX_op_ld8s_i32:
1995        tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1996        break;
1997    case INDEX_op_ld16u_i32:
1998        tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1999        break;
2000    case INDEX_op_ld16s_i32:
2001        tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
2002        break;
2003    case INDEX_op_ld_i32:
2004        tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
2005        break;
2006    case INDEX_op_st8_i32:
2007        tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
2008        break;
2009    case INDEX_op_st16_i32:
2010        tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
2011        break;
2012    case INDEX_op_st_i32:
2013        tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
2014        break;
2015
2016    case INDEX_op_movcond_i32:
2017        /* Constraints mean that v2 is always in the same register as dest,
2018         * so we only need to do "if condition passed, move v1 to dest".
2019         */
2020        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2021                        args[1], args[2], const_args[2]);
2022        tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
2023                        ARITH_MVN, args[0], 0, args[3], const_args[3]);
2024        break;
2025    case INDEX_op_add_i32:
2026        tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
2027                        args[0], args[1], args[2], const_args[2]);
2028        break;
2029    case INDEX_op_sub_i32:
2030        if (const_args[1]) {
2031            if (const_args[2]) {
2032                tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
2033            } else {
2034                tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
2035                               args[0], args[2], args[1], 1);
2036            }
2037        } else {
2038            tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
2039                            args[0], args[1], args[2], const_args[2]);
2040        }
2041        break;
2042    case INDEX_op_and_i32:
2043        tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
2044                        args[0], args[1], args[2], const_args[2]);
2045        break;
2046    case INDEX_op_andc_i32:
2047        tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
2048                        args[0], args[1], args[2], const_args[2]);
2049        break;
2050    case INDEX_op_or_i32:
2051        c = ARITH_ORR;
2052        goto gen_arith;
2053    case INDEX_op_xor_i32:
2054        c = ARITH_EOR;
2055        /* Fall through.  */
2056    gen_arith:
2057        tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
2058        break;
2059    case INDEX_op_add2_i32:
2060        a0 = args[0], a1 = args[1], a2 = args[2];
2061        a3 = args[3], a4 = args[4], a5 = args[5];
2062        if (a0 == a3 || (a0 == a5 && !const_args[5])) {
2063            a0 = TCG_REG_TMP;
2064        }
2065        tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
2066                        a0, a2, a4, const_args[4]);
2067        tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
2068                        a1, a3, a5, const_args[5]);
2069        tcg_out_mov_reg(s, COND_AL, args[0], a0);
2070        break;
2071    case INDEX_op_sub2_i32:
2072        a0 = args[0], a1 = args[1], a2 = args[2];
2073        a3 = args[3], a4 = args[4], a5 = args[5];
2074        if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
2075            a0 = TCG_REG_TMP;
2076        }
2077        if (const_args[2]) {
2078            if (const_args[4]) {
2079                tcg_out_movi32(s, COND_AL, a0, a4);
2080                a4 = a0;
2081            }
2082            tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
2083        } else {
2084            tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
2085                            ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
2086        }
2087        if (const_args[3]) {
2088            if (const_args[5]) {
2089                tcg_out_movi32(s, COND_AL, a1, a5);
2090                a5 = a1;
2091            }
2092            tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
2093        } else {
2094            tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
2095                            a1, a3, a5, const_args[5]);
2096        }
2097        tcg_out_mov_reg(s, COND_AL, args[0], a0);
2098        break;
2099    case INDEX_op_neg_i32:
2100        tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
2101        break;
2102    case INDEX_op_not_i32:
2103        tcg_out_dat_reg(s, COND_AL,
2104                        ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
2105        break;
2106    case INDEX_op_mul_i32:
2107        tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
2108        break;
2109    case INDEX_op_mulu2_i32:
2110        tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
2111        break;
2112    case INDEX_op_muls2_i32:
2113        tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
2114        break;
2115    /* XXX: Perhaps args[2] & 0x1f is wrong */
2116    case INDEX_op_shl_i32:
2117        c = const_args[2] ?
2118                SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
2119        goto gen_shift32;
2120    case INDEX_op_shr_i32:
2121        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
2122                SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
2123        goto gen_shift32;
2124    case INDEX_op_sar_i32:
2125        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
2126                SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
2127        goto gen_shift32;
2128    case INDEX_op_rotr_i32:
2129        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
2130                SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
2131        /* Fall through.  */
2132    gen_shift32:
2133        tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
2134        break;
2135
2136    case INDEX_op_rotl_i32:
2137        if (const_args[2]) {
2138            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
2139                            ((0x20 - args[2]) & 0x1f) ?
2140                            SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
2141                            SHIFT_IMM_LSL(0));
2142        } else {
2143            tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
2144            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
2145                            SHIFT_REG_ROR(TCG_REG_TMP));
2146        }
2147        break;
2148
2149    case INDEX_op_ctz_i32:
2150        tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
2151        a1 = TCG_REG_TMP;
2152        goto do_clz;
2153
2154    case INDEX_op_clz_i32:
2155        a1 = args[1];
2156    do_clz:
2157        a0 = args[0];
2158        a2 = args[2];
2159        c = const_args[2];
2160        if (c && a2 == 32) {
2161            tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
2162            break;
2163        }
2164        tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
2165        tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
2166        if (c || a0 != a2) {
2167            tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
2168        }
2169        break;
2170
2171    case INDEX_op_brcond_i32:
2172        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2173                       args[0], args[1], const_args[1]);
2174        tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]],
2175                           arg_label(args[3]));
2176        break;
2177    case INDEX_op_setcond_i32:
2178        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2179                        args[1], args[2], const_args[2]);
2180        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
2181                        ARITH_MOV, args[0], 0, 1);
2182        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
2183                        ARITH_MOV, args[0], 0, 0);
2184        break;
2185
2186    case INDEX_op_brcond2_i32:
2187        c = tcg_out_cmp2(s, args, const_args);
2188        tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
2189        break;
2190    case INDEX_op_setcond2_i32:
2191        c = tcg_out_cmp2(s, args + 1, const_args + 1);
2192        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
2193        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
2194                        ARITH_MOV, args[0], 0, 0);
2195        break;
2196
2197    case INDEX_op_qemu_ld_i32:
2198        tcg_out_qemu_ld(s, args, 0);
2199        break;
2200    case INDEX_op_qemu_ld_i64:
2201        tcg_out_qemu_ld(s, args, 1);
2202        break;
2203    case INDEX_op_qemu_st_i32:
2204        tcg_out_qemu_st(s, args, 0);
2205        break;
2206    case INDEX_op_qemu_st_i64:
2207        tcg_out_qemu_st(s, args, 1);
2208        break;
2209
2210    case INDEX_op_bswap16_i32:
2211        tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]);
2212        break;
2213    case INDEX_op_bswap32_i32:
2214        tcg_out_bswap32(s, COND_AL, args[0], args[1]);
2215        break;
2216
2217    case INDEX_op_ext8s_i32:
2218        tcg_out_ext8s(s, COND_AL, args[0], args[1]);
2219        break;
2220    case INDEX_op_ext16s_i32:
2221        tcg_out_ext16s(s, COND_AL, args[0], args[1]);
2222        break;
2223    case INDEX_op_ext16u_i32:
2224        tcg_out_ext16u(s, COND_AL, args[0], args[1]);
2225        break;
2226
2227    case INDEX_op_deposit_i32:
2228        tcg_out_deposit(s, COND_AL, args[0], args[2],
2229                        args[3], args[4], const_args[2]);
2230        break;
2231    case INDEX_op_extract_i32:
2232        tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
2233        break;
2234    case INDEX_op_sextract_i32:
2235        tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
2236        break;
2237    case INDEX_op_extract2_i32:
2238        /* ??? These optimization vs zero should be generic.  */
2239        /* ??? But we can't substitute 2 for 1 in the opcode stream yet.  */
2240        if (const_args[1]) {
2241            if (const_args[2]) {
2242                tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
2243            } else {
2244                tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2245                                args[2], SHIFT_IMM_LSL(32 - args[3]));
2246            }
2247        } else if (const_args[2]) {
2248            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2249                            args[1], SHIFT_IMM_LSR(args[3]));
2250        } else {
2251            /* We can do extract2 in 2 insns, vs the 3 required otherwise.  */
2252            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
2253                            args[2], SHIFT_IMM_LSL(32 - args[3]));
2254            tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
2255                            args[1], SHIFT_IMM_LSR(args[3]));
2256        }
2257        break;
2258
2259    case INDEX_op_div_i32:
2260        tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
2261        break;
2262    case INDEX_op_divu_i32:
2263        tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
2264        break;
2265
2266    case INDEX_op_mb:
2267        tcg_out_mb(s, args[0]);
2268        break;
2269
2270    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
2271    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
2272    default:
2273        tcg_abort();
2274    }
2275}
2276
2277static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2278{
2279    switch (op) {
2280    case INDEX_op_goto_ptr:
2281        return C_O0_I1(r);
2282
2283    case INDEX_op_ld8u_i32:
2284    case INDEX_op_ld8s_i32:
2285    case INDEX_op_ld16u_i32:
2286    case INDEX_op_ld16s_i32:
2287    case INDEX_op_ld_i32:
2288    case INDEX_op_neg_i32:
2289    case INDEX_op_not_i32:
2290    case INDEX_op_bswap16_i32:
2291    case INDEX_op_bswap32_i32:
2292    case INDEX_op_ext8s_i32:
2293    case INDEX_op_ext16s_i32:
2294    case INDEX_op_ext16u_i32:
2295    case INDEX_op_extract_i32:
2296    case INDEX_op_sextract_i32:
2297        return C_O1_I1(r, r);
2298
2299    case INDEX_op_st8_i32:
2300    case INDEX_op_st16_i32:
2301    case INDEX_op_st_i32:
2302        return C_O0_I2(r, r);
2303
2304    case INDEX_op_add_i32:
2305    case INDEX_op_sub_i32:
2306    case INDEX_op_setcond_i32:
2307        return C_O1_I2(r, r, rIN);
2308
2309    case INDEX_op_and_i32:
2310    case INDEX_op_andc_i32:
2311    case INDEX_op_clz_i32:
2312    case INDEX_op_ctz_i32:
2313        return C_O1_I2(r, r, rIK);
2314
2315    case INDEX_op_mul_i32:
2316    case INDEX_op_div_i32:
2317    case INDEX_op_divu_i32:
2318        return C_O1_I2(r, r, r);
2319
2320    case INDEX_op_mulu2_i32:
2321    case INDEX_op_muls2_i32:
2322        return C_O2_I2(r, r, r, r);
2323
2324    case INDEX_op_or_i32:
2325    case INDEX_op_xor_i32:
2326        return C_O1_I2(r, r, rI);
2327
2328    case INDEX_op_shl_i32:
2329    case INDEX_op_shr_i32:
2330    case INDEX_op_sar_i32:
2331    case INDEX_op_rotl_i32:
2332    case INDEX_op_rotr_i32:
2333        return C_O1_I2(r, r, ri);
2334
2335    case INDEX_op_brcond_i32:
2336        return C_O0_I2(r, rIN);
2337    case INDEX_op_deposit_i32:
2338        return C_O1_I2(r, 0, rZ);
2339    case INDEX_op_extract2_i32:
2340        return C_O1_I2(r, rZ, rZ);
2341    case INDEX_op_movcond_i32:
2342        return C_O1_I4(r, r, rIN, rIK, 0);
2343    case INDEX_op_add2_i32:
2344        return C_O2_I4(r, r, r, r, rIN, rIK);
2345    case INDEX_op_sub2_i32:
2346        return C_O2_I4(r, r, rI, rI, rIN, rIK);
2347    case INDEX_op_brcond2_i32:
2348        return C_O0_I4(r, r, rI, rI);
2349    case INDEX_op_setcond2_i32:
2350        return C_O1_I4(r, r, r, rI, rI);
2351
2352    case INDEX_op_qemu_ld_i32:
2353        return TARGET_LONG_BITS == 32 ? C_O1_I1(r, l) : C_O1_I2(r, l, l);
2354    case INDEX_op_qemu_ld_i64:
2355        return TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, l) : C_O2_I2(r, r, l, l);
2356    case INDEX_op_qemu_st_i32:
2357        return TARGET_LONG_BITS == 32 ? C_O0_I2(s, s) : C_O0_I3(s, s, s);
2358    case INDEX_op_qemu_st_i64:
2359        return TARGET_LONG_BITS == 32 ? C_O0_I3(s, s, s) : C_O0_I4(s, s, s, s);
2360
2361    case INDEX_op_st_vec:
2362        return C_O0_I2(w, r);
2363    case INDEX_op_ld_vec:
2364    case INDEX_op_dupm_vec:
2365        return C_O1_I1(w, r);
2366    case INDEX_op_dup_vec:
2367        return C_O1_I1(w, wr);
2368    case INDEX_op_abs_vec:
2369    case INDEX_op_neg_vec:
2370    case INDEX_op_not_vec:
2371    case INDEX_op_shli_vec:
2372    case INDEX_op_shri_vec:
2373    case INDEX_op_sari_vec:
2374        return C_O1_I1(w, w);
2375    case INDEX_op_dup2_vec:
2376    case INDEX_op_add_vec:
2377    case INDEX_op_mul_vec:
2378    case INDEX_op_smax_vec:
2379    case INDEX_op_smin_vec:
2380    case INDEX_op_ssadd_vec:
2381    case INDEX_op_sssub_vec:
2382    case INDEX_op_sub_vec:
2383    case INDEX_op_umax_vec:
2384    case INDEX_op_umin_vec:
2385    case INDEX_op_usadd_vec:
2386    case INDEX_op_ussub_vec:
2387    case INDEX_op_xor_vec:
2388    case INDEX_op_arm_sshl_vec:
2389    case INDEX_op_arm_ushl_vec:
2390        return C_O1_I2(w, w, w);
2391    case INDEX_op_arm_sli_vec:
2392        return C_O1_I2(w, 0, w);
2393    case INDEX_op_or_vec:
2394    case INDEX_op_andc_vec:
2395        return C_O1_I2(w, w, wO);
2396    case INDEX_op_and_vec:
2397    case INDEX_op_orc_vec:
2398        return C_O1_I2(w, w, wV);
2399    case INDEX_op_cmp_vec:
2400        return C_O1_I2(w, w, wZ);
2401    case INDEX_op_bitsel_vec:
2402        return C_O1_I3(w, w, w, w);
2403    default:
2404        g_assert_not_reached();
2405    }
2406}
2407
2408static void tcg_target_init(TCGContext *s)
2409{
2410    /*
2411     * Only probe for the platform and capabilities if we haven't already
2412     * determined maximum values at compile time.
2413     */
2414#if !defined(use_idiv_instructions) || !defined(use_neon_instructions)
2415    {
2416        unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2417#ifndef use_idiv_instructions
2418        use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
2419#endif
2420#ifndef use_neon_instructions
2421        use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0;
2422#endif
2423    }
2424#endif
2425
2426    if (__ARM_ARCH < 7) {
2427        const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
2428        if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
2429            arm_arch = pl[1] - '0';
2430        }
2431    }
2432
2433    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2434
2435    tcg_target_call_clobber_regs = 0;
2436    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2437    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2438    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2439    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2440    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
2441    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2442
2443    if (use_neon_instructions) {
2444        tcg_target_available_regs[TCG_TYPE_V64]  = ALL_VECTOR_REGS;
2445        tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2446
2447        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0);
2448        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1);
2449        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2);
2450        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3);
2451        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8);
2452        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9);
2453        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10);
2454        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11);
2455        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12);
2456        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13);
2457        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14);
2458        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15);
2459    }
2460
2461    s->reserved_regs = 0;
2462    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2463    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2464    tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
2465    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP);
2466}
2467
2468static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2469                       TCGReg arg1, intptr_t arg2)
2470{
2471    switch (type) {
2472    case TCG_TYPE_I32:
2473        tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2474        return;
2475    case TCG_TYPE_V64:
2476        /* regs 1; size 8; align 8 */
2477        tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2);
2478        return;
2479    case TCG_TYPE_V128:
2480        /* regs 2; size 8; align 16 */
2481        tcg_out_vldst(s, INSN_VLD1 | 0xae0, arg, arg1, arg2);
2482        return;
2483    default:
2484        g_assert_not_reached();
2485    }
2486}
2487
2488static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2489                       TCGReg arg1, intptr_t arg2)
2490{
2491    switch (type) {
2492    case TCG_TYPE_I32:
2493        tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2494        return;
2495    case TCG_TYPE_V64:
2496        /* regs 1; size 8; align 8 */
2497        tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2);
2498        return;
2499    case TCG_TYPE_V128:
2500        /* regs 2; size 8; align 16 */
2501        tcg_out_vldst(s, INSN_VST1 | 0xae0, arg, arg1, arg2);
2502        return;
2503    default:
2504        g_assert_not_reached();
2505    }
2506}
2507
2508static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
2509                               TCGReg base, intptr_t ofs)
2510{
2511    return false;
2512}
2513
2514static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
2515{
2516    if (ret == arg) {
2517        return true;
2518    }
2519    switch (type) {
2520    case TCG_TYPE_I32:
2521        if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) {
2522            tcg_out_mov_reg(s, COND_AL, ret, arg);
2523            return true;
2524        }
2525        return false;
2526
2527    case TCG_TYPE_V64:
2528    case TCG_TYPE_V128:
2529        /* "VMOV D,N" is an alias for "VORR D,N,N". */
2530        tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg);
2531        return true;
2532
2533    default:
2534        g_assert_not_reached();
2535    }
2536}
2537
2538static void tcg_out_movi(TCGContext *s, TCGType type,
2539                         TCGReg ret, tcg_target_long arg)
2540{
2541    tcg_debug_assert(type == TCG_TYPE_I32);
2542    tcg_debug_assert(ret < TCG_REG_Q0);
2543    tcg_out_movi32(s, COND_AL, ret, arg);
2544}
2545
2546/* Type is always V128, with I64 elements.  */
2547static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh)
2548{
2549    /* Move high element into place first. */
2550    /* VMOV Dd+1, Ds */
2551    tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh);
2552    /* Move low element into place; tcg_out_mov will check for nop. */
2553    tcg_out_mov(s, TCG_TYPE_V64, rd, rl);
2554}
2555
2556static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2557                            TCGReg rd, TCGReg rs)
2558{
2559    int q = type - TCG_TYPE_V64;
2560
2561    if (vece == MO_64) {
2562        if (type == TCG_TYPE_V128) {
2563            tcg_out_dup2_vec(s, rd, rs, rs);
2564        } else {
2565            tcg_out_mov(s, TCG_TYPE_V64, rd, rs);
2566        }
2567    } else if (rs < TCG_REG_Q0) {
2568        int b = (vece == MO_8);
2569        int e = (vece == MO_16);
2570        tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) |
2571                  encode_vn(rd) | (rs << 12));
2572    } else {
2573        int imm4 = 1 << vece;
2574        tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) |
2575                  encode_vd(rd) | encode_vm(rs));
2576    }
2577    return true;
2578}
2579
2580static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2581                             TCGReg rd, TCGReg base, intptr_t offset)
2582{
2583    if (vece == MO_64) {
2584        tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset);
2585        if (type == TCG_TYPE_V128) {
2586            tcg_out_dup2_vec(s, rd, rd, rd);
2587        }
2588    } else {
2589        int q = type - TCG_TYPE_V64;
2590        tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5),
2591                      rd, base, offset);
2592    }
2593    return true;
2594}
2595
2596static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
2597                             TCGReg rd, int64_t v64)
2598{
2599    int q = type - TCG_TYPE_V64;
2600    int cmode, imm8, i;
2601
2602    /* Test all bytes equal first.  */
2603    if (vece == MO_8) {
2604        tcg_out_vmovi(s, rd, q, 0, 0xe, v64);
2605        return;
2606    }
2607
2608    /*
2609     * Test all bytes 0x00 or 0xff second.  This can match cases that
2610     * might otherwise take 2 or 3 insns for MO_16 or MO_32 below.
2611     */
2612    for (i = imm8 = 0; i < 8; i++) {
2613        uint8_t byte = v64 >> (i * 8);
2614        if (byte == 0xff) {
2615            imm8 |= 1 << i;
2616        } else if (byte != 0) {
2617            goto fail_bytes;
2618        }
2619    }
2620    tcg_out_vmovi(s, rd, q, 1, 0xe, imm8);
2621    return;
2622 fail_bytes:
2623
2624    /*
2625     * Tests for various replications.  For each element width, if we
2626     * cannot find an expansion there's no point checking a larger
2627     * width because we already know by replication it cannot match.
2628     */
2629    if (vece == MO_16) {
2630        uint16_t v16 = v64;
2631
2632        if (is_shimm16(v16, &cmode, &imm8)) {
2633            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2634            return;
2635        }
2636        if (is_shimm16(~v16, &cmode, &imm8)) {
2637            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2638            return;
2639        }
2640
2641        /*
2642         * Otherwise, all remaining constants can be loaded in two insns:
2643         * rd = v16 & 0xff, rd |= v16 & 0xff00.
2644         */
2645        tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff);
2646        tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8);   /* VORRI */
2647        return;
2648    }
2649
2650    if (vece == MO_32) {
2651        uint32_t v32 = v64;
2652
2653        if (is_shimm32(v32, &cmode, &imm8) ||
2654            is_soimm32(v32, &cmode, &imm8)) {
2655            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2656            return;
2657        }
2658        if (is_shimm32(~v32, &cmode, &imm8) ||
2659            is_soimm32(~v32, &cmode, &imm8)) {
2660            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2661            return;
2662        }
2663
2664        /*
2665         * Restrict the set of constants to those we can load with
2666         * two instructions.  Others we load from the pool.
2667         */
2668        i = is_shimm32_pair(v32, &cmode, &imm8);
2669        if (i) {
2670            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2671            tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8));
2672            return;
2673        }
2674        i = is_shimm32_pair(~v32, &cmode, &imm8);
2675        if (i) {
2676            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2677            tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8));
2678            return;
2679        }
2680    }
2681
2682    /*
2683     * As a last resort, load from the constant pool.
2684     */
2685    if (!q || vece == MO_64) {
2686        new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32);
2687        /* VLDR Dd, [pc + offset] */
2688        tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16));
2689        if (q) {
2690            tcg_out_dup2_vec(s, rd, rd, rd);
2691        }
2692    } else {
2693        new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0);
2694        /* add tmp, pc, offset */
2695        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0);
2696        tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0);
2697    }
2698}
2699
2700static const ARMInsn vec_cmp_insn[16] = {
2701    [TCG_COND_EQ] = INSN_VCEQ,
2702    [TCG_COND_GT] = INSN_VCGT,
2703    [TCG_COND_GE] = INSN_VCGE,
2704    [TCG_COND_GTU] = INSN_VCGT_U,
2705    [TCG_COND_GEU] = INSN_VCGE_U,
2706};
2707
2708static const ARMInsn vec_cmp0_insn[16] = {
2709    [TCG_COND_EQ] = INSN_VCEQ0,
2710    [TCG_COND_GT] = INSN_VCGT0,
2711    [TCG_COND_GE] = INSN_VCGE0,
2712    [TCG_COND_LT] = INSN_VCLT0,
2713    [TCG_COND_LE] = INSN_VCLE0,
2714};
2715
2716static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2717                           unsigned vecl, unsigned vece,
2718                           const TCGArg *args, const int *const_args)
2719{
2720    TCGType type = vecl + TCG_TYPE_V64;
2721    unsigned q = vecl;
2722    TCGArg a0, a1, a2, a3;
2723    int cmode, imm8;
2724
2725    a0 = args[0];
2726    a1 = args[1];
2727    a2 = args[2];
2728
2729    switch (opc) {
2730    case INDEX_op_ld_vec:
2731        tcg_out_ld(s, type, a0, a1, a2);
2732        return;
2733    case INDEX_op_st_vec:
2734        tcg_out_st(s, type, a0, a1, a2);
2735        return;
2736    case INDEX_op_dupm_vec:
2737        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2738        return;
2739    case INDEX_op_dup2_vec:
2740        tcg_out_dup2_vec(s, a0, a1, a2);
2741        return;
2742    case INDEX_op_abs_vec:
2743        tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1);
2744        return;
2745    case INDEX_op_neg_vec:
2746        tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1);
2747        return;
2748    case INDEX_op_not_vec:
2749        tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1);
2750        return;
2751    case INDEX_op_add_vec:
2752        tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2);
2753        return;
2754    case INDEX_op_mul_vec:
2755        tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2);
2756        return;
2757    case INDEX_op_smax_vec:
2758        tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2);
2759        return;
2760    case INDEX_op_smin_vec:
2761        tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2);
2762        return;
2763    case INDEX_op_sub_vec:
2764        tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2);
2765        return;
2766    case INDEX_op_ssadd_vec:
2767        tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2);
2768        return;
2769    case INDEX_op_sssub_vec:
2770        tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2);
2771        return;
2772    case INDEX_op_umax_vec:
2773        tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2);
2774        return;
2775    case INDEX_op_umin_vec:
2776        tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2);
2777        return;
2778    case INDEX_op_usadd_vec:
2779        tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2);
2780        return;
2781    case INDEX_op_ussub_vec:
2782        tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2);
2783        return;
2784    case INDEX_op_xor_vec:
2785        tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
2786        return;
2787    case INDEX_op_arm_sshl_vec:
2788        /*
2789         * Note that Vm is the data and Vn is the shift count,
2790         * therefore the arguments appear reversed.
2791         */
2792        tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1);
2793        return;
2794    case INDEX_op_arm_ushl_vec:
2795        /* See above. */
2796        tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1);
2797        return;
2798    case INDEX_op_shli_vec:
2799        tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece));
2800        return;
2801    case INDEX_op_shri_vec:
2802        tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2);
2803        return;
2804    case INDEX_op_sari_vec:
2805        tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2);
2806        return;
2807    case INDEX_op_arm_sli_vec:
2808        tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece));
2809        return;
2810
2811    case INDEX_op_andc_vec:
2812        if (!const_args[2]) {
2813            tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2);
2814            return;
2815        }
2816        a2 = ~a2;
2817        /* fall through */
2818    case INDEX_op_and_vec:
2819        if (const_args[2]) {
2820            is_shimm1632(~a2, &cmode, &imm8);
2821            if (a0 == a1) {
2822                tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */
2823                return;
2824            }
2825            tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */
2826            a2 = a0;
2827        }
2828        tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2);
2829        return;
2830
2831    case INDEX_op_orc_vec:
2832        if (!const_args[2]) {
2833            tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2);
2834            return;
2835        }
2836        a2 = ~a2;
2837        /* fall through */
2838    case INDEX_op_or_vec:
2839        if (const_args[2]) {
2840            is_shimm1632(a2, &cmode, &imm8);
2841            if (a0 == a1) {
2842                tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */
2843                return;
2844            }
2845            tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */
2846            a2 = a0;
2847        }
2848        tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2);
2849        return;
2850
2851    case INDEX_op_cmp_vec:
2852        {
2853            TCGCond cond = args[3];
2854
2855            if (cond == TCG_COND_NE) {
2856                if (const_args[2]) {
2857                    tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1);
2858                } else {
2859                    tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2);
2860                    tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
2861                }
2862            } else {
2863                ARMInsn insn;
2864
2865                if (const_args[2]) {
2866                    insn = vec_cmp0_insn[cond];
2867                    if (insn) {
2868                        tcg_out_vreg2(s, insn, q, vece, a0, a1);
2869                        return;
2870                    }
2871                    tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
2872                    a2 = TCG_VEC_TMP;
2873                }
2874                insn = vec_cmp_insn[cond];
2875                if (insn == 0) {
2876                    TCGArg t;
2877                    t = a1, a1 = a2, a2 = t;
2878                    cond = tcg_swap_cond(cond);
2879                    insn = vec_cmp_insn[cond];
2880                    tcg_debug_assert(insn != 0);
2881                }
2882                tcg_out_vreg3(s, insn, q, vece, a0, a1, a2);
2883            }
2884        }
2885        return;
2886
2887    case INDEX_op_bitsel_vec:
2888        a3 = args[3];
2889        if (a0 == a3) {
2890            tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1);
2891        } else if (a0 == a2) {
2892            tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1);
2893        } else {
2894            tcg_out_mov(s, type, a0, a1);
2895            tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3);
2896        }
2897        return;
2898
2899    case INDEX_op_mov_vec:  /* Always emitted via tcg_out_mov.  */
2900    case INDEX_op_dup_vec:  /* Always emitted via tcg_out_dup_vec.  */
2901    default:
2902        g_assert_not_reached();
2903    }
2904}
2905
2906int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2907{
2908    switch (opc) {
2909    case INDEX_op_add_vec:
2910    case INDEX_op_sub_vec:
2911    case INDEX_op_and_vec:
2912    case INDEX_op_andc_vec:
2913    case INDEX_op_or_vec:
2914    case INDEX_op_orc_vec:
2915    case INDEX_op_xor_vec:
2916    case INDEX_op_not_vec:
2917    case INDEX_op_shli_vec:
2918    case INDEX_op_shri_vec:
2919    case INDEX_op_sari_vec:
2920    case INDEX_op_ssadd_vec:
2921    case INDEX_op_sssub_vec:
2922    case INDEX_op_usadd_vec:
2923    case INDEX_op_ussub_vec:
2924    case INDEX_op_bitsel_vec:
2925        return 1;
2926    case INDEX_op_abs_vec:
2927    case INDEX_op_cmp_vec:
2928    case INDEX_op_mul_vec:
2929    case INDEX_op_neg_vec:
2930    case INDEX_op_smax_vec:
2931    case INDEX_op_smin_vec:
2932    case INDEX_op_umax_vec:
2933    case INDEX_op_umin_vec:
2934        return vece < MO_64;
2935    case INDEX_op_shlv_vec:
2936    case INDEX_op_shrv_vec:
2937    case INDEX_op_sarv_vec:
2938    case INDEX_op_rotli_vec:
2939    case INDEX_op_rotlv_vec:
2940    case INDEX_op_rotrv_vec:
2941        return -1;
2942    default:
2943        return 0;
2944    }
2945}
2946
2947void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2948                       TCGArg a0, ...)
2949{
2950    va_list va;
2951    TCGv_vec v0, v1, v2, t1, t2, c1;
2952    TCGArg a2;
2953
2954    va_start(va, a0);
2955    v0 = temp_tcgv_vec(arg_temp(a0));
2956    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
2957    a2 = va_arg(va, TCGArg);
2958    va_end(va);
2959
2960    switch (opc) {
2961    case INDEX_op_shlv_vec:
2962        /*
2963         * Merely propagate shlv_vec to arm_ushl_vec.
2964         * In this way we don't set TCG_TARGET_HAS_shv_vec
2965         * because everything is done via expansion.
2966         */
2967        v2 = temp_tcgv_vec(arg_temp(a2));
2968        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
2969                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
2970        break;
2971
2972    case INDEX_op_shrv_vec:
2973    case INDEX_op_sarv_vec:
2974        /* Right shifts are negative left shifts for NEON.  */
2975        v2 = temp_tcgv_vec(arg_temp(a2));
2976        t1 = tcg_temp_new_vec(type);
2977        tcg_gen_neg_vec(vece, t1, v2);
2978        if (opc == INDEX_op_shrv_vec) {
2979            opc = INDEX_op_arm_ushl_vec;
2980        } else {
2981            opc = INDEX_op_arm_sshl_vec;
2982        }
2983        vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
2984                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2985        tcg_temp_free_vec(t1);
2986        break;
2987
2988    case INDEX_op_rotli_vec:
2989        t1 = tcg_temp_new_vec(type);
2990        tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
2991        vec_gen_4(INDEX_op_arm_sli_vec, type, vece,
2992                  tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
2993        tcg_temp_free_vec(t1);
2994        break;
2995
2996    case INDEX_op_rotlv_vec:
2997        v2 = temp_tcgv_vec(arg_temp(a2));
2998        t1 = tcg_temp_new_vec(type);
2999        c1 = tcg_constant_vec(type, vece, 8 << vece);
3000        tcg_gen_sub_vec(vece, t1, v2, c1);
3001        /* Right shifts are negative left shifts for NEON.  */
3002        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
3003                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3004        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
3005                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3006        tcg_gen_or_vec(vece, v0, v0, t1);
3007        tcg_temp_free_vec(t1);
3008        break;
3009
3010    case INDEX_op_rotrv_vec:
3011        v2 = temp_tcgv_vec(arg_temp(a2));
3012        t1 = tcg_temp_new_vec(type);
3013        t2 = tcg_temp_new_vec(type);
3014        c1 = tcg_constant_vec(type, vece, 8 << vece);
3015        tcg_gen_neg_vec(vece, t1, v2);
3016        tcg_gen_sub_vec(vece, t2, c1, v2);
3017        /* Right shifts are negative left shifts for NEON.  */
3018        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
3019                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3020        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2),
3021                  tcgv_vec_arg(v1), tcgv_vec_arg(t2));
3022        tcg_gen_or_vec(vece, v0, t1, t2);
3023        tcg_temp_free_vec(t1);
3024        tcg_temp_free_vec(t2);
3025        break;
3026
3027    default:
3028        g_assert_not_reached();
3029    }
3030}
3031
3032static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3033{
3034    int i;
3035    for (i = 0; i < count; ++i) {
3036        p[i] = INSN_NOP;
3037    }
3038}
3039
3040/* Compute frame size via macros, to share between tcg_target_qemu_prologue
3041   and tcg_register_jit.  */
3042
3043#define PUSH_SIZE  ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
3044
3045#define FRAME_SIZE \
3046    ((PUSH_SIZE \
3047      + TCG_STATIC_CALL_ARGS_SIZE \
3048      + CPU_TEMP_BUF_NLONGS * sizeof(long) \
3049      + TCG_TARGET_STACK_ALIGN - 1) \
3050     & -TCG_TARGET_STACK_ALIGN)
3051
3052#define STACK_ADDEND  (FRAME_SIZE - PUSH_SIZE)
3053
3054static void tcg_target_qemu_prologue(TCGContext *s)
3055{
3056    /* Calling convention requires us to save r4-r11 and lr.  */
3057    /* stmdb sp!, { r4 - r11, lr } */
3058    tcg_out32(s, (COND_AL << 28) | 0x092d4ff0);
3059
3060    /* Reserve callee argument and tcg temp space.  */
3061    tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
3062                   TCG_REG_CALL_STACK, STACK_ADDEND, 1);
3063    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
3064                  CPU_TEMP_BUF_NLONGS * sizeof(long));
3065
3066    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3067
3068    tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
3069
3070    /*
3071     * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3072     * and fall through to the rest of the epilogue.
3073     */
3074    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
3075    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
3076    tcg_out_epilogue(s);
3077}
3078
3079static void tcg_out_epilogue(TCGContext *s)
3080{
3081    /* Release local stack frame.  */
3082    tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
3083                   TCG_REG_CALL_STACK, STACK_ADDEND, 1);
3084
3085    /* ldmia sp!, { r4 - r11, pc } */
3086    tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0);
3087}
3088
3089typedef struct {
3090    DebugFrameHeader h;
3091    uint8_t fde_def_cfa[4];
3092    uint8_t fde_reg_ofs[18];
3093} DebugFrame;
3094
3095#define ELF_HOST_MACHINE EM_ARM
3096
3097/* We're expecting a 2 byte uleb128 encoded value.  */
3098QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3099
3100static const DebugFrame debug_frame = {
3101    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3102    .h.cie.id = -1,
3103    .h.cie.version = 1,
3104    .h.cie.code_align = 1,
3105    .h.cie.data_align = 0x7c,             /* sleb128 -4 */
3106    .h.cie.return_column = 14,
3107
3108    /* Total FDE size does not include the "len" member.  */
3109    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3110
3111    .fde_def_cfa = {
3112        12, 13,                         /* DW_CFA_def_cfa sp, ... */
3113        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
3114        (FRAME_SIZE >> 7)
3115    },
3116    .fde_reg_ofs = {
3117        /* The following must match the stmdb in the prologue.  */
3118        0x8e, 1,                        /* DW_CFA_offset, lr, -4 */
3119        0x8b, 2,                        /* DW_CFA_offset, r11, -8 */
3120        0x8a, 3,                        /* DW_CFA_offset, r10, -12 */
3121        0x89, 4,                        /* DW_CFA_offset, r9, -16 */
3122        0x88, 5,                        /* DW_CFA_offset, r8, -20 */
3123        0x87, 6,                        /* DW_CFA_offset, r7, -24 */
3124        0x86, 7,                        /* DW_CFA_offset, r6, -28 */
3125        0x85, 8,                        /* DW_CFA_offset, r5, -32 */
3126        0x84, 9,                        /* DW_CFA_offset, r4, -36 */
3127    }
3128};
3129
3130void tcg_register_jit(const void *buf, size_t buf_size)
3131{
3132    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3133}
3134